code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) __snake_case = ['''model.decoder.embed_positions.weights'''] def lowerCAmelCase_ ( __lowerCAmelCase )-> Dict: '''simple docstring''' if "emb" in name: UpperCAmelCase : Union[str, Any] =name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: UpperCAmelCase : List[str] =name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: UpperCAmelCase : str =name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: UpperCAmelCase : Any =name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: UpperCAmelCase : Union[str, Any] =name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: UpperCAmelCase : int =name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: UpperCAmelCase : Any =name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: UpperCAmelCase : Tuple =name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: UpperCAmelCase : Optional[Any] =name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: UpperCAmelCase : str =name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : List[Any] =name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple[Dict, Dict]: '''simple docstring''' UpperCAmelCase : str =list(state_dict.keys() ) UpperCAmelCase : Union[str, Any] ={} for key in keys: UpperCAmelCase : List[Any] =state_dict.pop(__lowerCAmelCase ) UpperCAmelCase : int =rename_keys(__lowerCAmelCase ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : List[Any] =val[:hidden_size, :] UpperCAmelCase : Tuple =val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Dict =val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : Optional[Any] =val else: UpperCAmelCase : Union[str, Any] =val return state_dict, enc_dec_proj_state_dict def lowerCAmelCase_ ( __lowerCAmelCase )-> MusicgenDecoderConfig: '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : str =10_24 UpperCAmelCase : List[str] =24 UpperCAmelCase : Any =16 elif checkpoint == "medium": UpperCAmelCase : Any =15_36 UpperCAmelCase : str =48 UpperCAmelCase : List[Any] =24 elif checkpoint == "large": UpperCAmelCase : Any =20_48 UpperCAmelCase : Dict =48 UpperCAmelCase : List[str] =32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) UpperCAmelCase : Optional[int] =MusicgenDecoderConfig( hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , ) return config @torch.no_grad() def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="cpu" )-> Any: '''simple docstring''' UpperCAmelCase : Dict =MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase ) UpperCAmelCase : Dict =decoder_config_from_checkpoint(__lowerCAmelCase ) UpperCAmelCase : str =fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : int =rename_state_dict( __lowerCAmelCase , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Dict =TaEncoderModel.from_pretrained('''t5-base''' ) UpperCAmelCase : List[str] =EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) UpperCAmelCase : Optional[Any] =MusicgenForCausalLM(__lowerCAmelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : int =decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(__lowerCAmelCase ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model UpperCAmelCase : Any =MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase ) # check we can do a forward pass UpperCAmelCase : Any =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Any =input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : List[Any] =model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits if logits.shape != (8, 1, 20_48): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor UpperCAmelCase : str =AutoTokenizer.from_pretrained('''t5-base''' ) UpperCAmelCase : Optional[Any] =AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) UpperCAmelCase : Tuple =MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) # set the appropriate bos/pad token ids UpperCAmelCase : Optional[int] =20_48 UpperCAmelCase : Union[str, Any] =20_48 # set other default generation config params UpperCAmelCase : Any =int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str =True UpperCAmelCase : Any =3.0 if pytorch_dump_folder is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(__lowerCAmelCase ) processor.push_to_hub(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) __snake_case = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
348
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __snake_case = parser.parse_args() __snake_case = '''cpu''' __snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __snake_case = '''path-to-your-trained-model''' __snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __snake_case = pipe.to(device) # to channels last __snake_case = pipe.unet.to(memory_format=torch.channels_last) __snake_case = pipe.vae.to(memory_format=torch.channels_last) __snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __snake_case = torch.randn(2, 4, 64, 64) __snake_case = torch.rand(1) * 9_99 __snake_case = torch.randn(2, 77, 7_68) __snake_case = (sample, timestep, encoder_hidden_status) try: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __snake_case = 6_66 __snake_case = torch.Generator(device).manual_seed(seed) __snake_case = {'''generator''': generator} if args.steps is not None: __snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
348
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =tempfile.mkdtemp() # fmt: off UpperCAmelCase : Dict =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on UpperCAmelCase : Optional[int] =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase : Optional[Any] =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] UpperCAmelCase : List[Any] ={'''unk_token''': '''<unk>'''} UpperCAmelCase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(snake_case__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(snake_case__ ) ) UpperCAmelCase : str ={ '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } UpperCAmelCase : Union[str, Any] =os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self , **snake_case__ ) -> List[Any]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase__ ( self , **snake_case__ ) -> Tuple: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase__ ( self , **snake_case__ ) -> List[str]: '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase : Any =[Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int =self.get_tokenizer() UpperCAmelCase : int =self.get_rust_tokenizer() UpperCAmelCase : Tuple =self.get_image_processor() UpperCAmelCase : Optional[int] =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase : Union[str, Any] =CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ ) UpperCAmelCase : int =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase : Dict =CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case__ ) self.assertIsInstance(processor_fast.tokenizer , snake_case__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case__ ) self.assertIsInstance(processor_fast.image_processor , snake_case__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[int] =CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : Optional[Any] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCAmelCase : int =self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) UpperCAmelCase : Union[str, Any] =CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.get_image_processor() UpperCAmelCase : Optional[Any] =self.get_tokenizer() UpperCAmelCase : Dict =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase : Dict =self.prepare_image_inputs() UpperCAmelCase : str =image_processor(snake_case__ , return_tensors='''np''' ) UpperCAmelCase : int =processor(images=snake_case__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.get_image_processor() UpperCAmelCase : int =self.get_tokenizer() UpperCAmelCase : Dict =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase : int ='''lower newer''' UpperCAmelCase : Tuple =processor(text=snake_case__ ) UpperCAmelCase : Optional[Any] =tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : int =self.get_image_processor() UpperCAmelCase : Dict =self.get_tokenizer() UpperCAmelCase : Any =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase : str ='''lower newer''' UpperCAmelCase : Optional[int] =self.prepare_image_inputs() UpperCAmelCase : Tuple =processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : int =self.get_image_processor() UpperCAmelCase : Optional[int] =self.get_tokenizer() UpperCAmelCase : List[str] =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase : Optional[Any] =processor.batch_decode(snake_case__ ) UpperCAmelCase : Union[str, Any] =tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =self.get_image_processor() UpperCAmelCase : Optional[int] =self.get_tokenizer() UpperCAmelCase : Optional[int] =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase : Dict ='''lower newer''' UpperCAmelCase : Any =self.prepare_image_inputs() UpperCAmelCase : Optional[Any] =processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
348
__snake_case = '''Input must be a string of 8 numbers plus letter''' __snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE''' def lowerCAmelCase_ ( __lowerCAmelCase )-> bool: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}''' raise TypeError(__lowerCAmelCase ) UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper() if len(__lowerCAmelCase ) != 9: raise ValueError(__lowerCAmelCase ) try: UpperCAmelCase : int =int(spanish_id_clean[0:8] ) UpperCAmelCase : Optional[int] =spanish_id_clean[8] except ValueError as ex: raise ValueError(__lowerCAmelCase ) from ex if letter.isdigit(): raise ValueError(__lowerCAmelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
348
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''', } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Dict = """mra""" def __init__( self , snake_case__=5_0265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="absolute" , snake_case__=4 , snake_case__="full" , snake_case__=0 , snake_case__=0 , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : Optional[int] =vocab_size UpperCAmelCase : List[Any] =max_position_embeddings UpperCAmelCase : int =hidden_size UpperCAmelCase : Union[str, Any] =num_hidden_layers UpperCAmelCase : Dict =num_attention_heads UpperCAmelCase : Any =intermediate_size UpperCAmelCase : Dict =hidden_act UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : str =attention_probs_dropout_prob UpperCAmelCase : str =initializer_range UpperCAmelCase : Any =type_vocab_size UpperCAmelCase : Optional[Any] =layer_norm_eps UpperCAmelCase : Optional[Any] =position_embedding_type UpperCAmelCase : List[Any] =block_per_row UpperCAmelCase : str =approx_mode UpperCAmelCase : int =initial_prior_first_n_blocks UpperCAmelCase : List[str] =initial_prior_diagonal_n_blocks
348
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:] if shift_amount >= len(__lowerCAmelCase ): return "0b0" UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:] else: # Get binary (2's complement) representation of negative number UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Optional[Any] =( '''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number ) if shift_amount >= len(__lowerCAmelCase ): return "0b" + binary_number[0] * len(__lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> List[Any]: '''simple docstring''' UpperCAmelCase : Any ={ '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } UpperCAmelCase , UpperCAmelCase : Optional[int] =input_paths_and_base_extractors[compression_format] if input_path is None: UpperCAmelCase : int =f'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__lowerCAmelCase ) assert base_extractor.is_extractable(__lowerCAmelCase ) UpperCAmelCase : str =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCAmelCase : List[str] =file_path.read_text(encoding='''utf-8''' ) else: UpperCAmelCase : Optional[Any] =output_path.read_text(encoding='''utf-8''' ) UpperCAmelCase : List[str] =text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> str: '''simple docstring''' UpperCAmelCase : Any ={ '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } UpperCAmelCase : Optional[int] =input_paths[compression_format] if input_path is None: UpperCAmelCase : Any =f'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__lowerCAmelCase ) UpperCAmelCase : Dict =Extractor.infer_extractor_format(__lowerCAmelCase ) assert extractor_format is not None UpperCAmelCase : List[Any] =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCAmelCase : Tuple =file_path.read_text(encoding='''utf-8''' ) else: UpperCAmelCase : List[str] =output_path.read_text(encoding='''utf-8''' ) UpperCAmelCase : List[Any] =text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' import tarfile UpperCAmelCase : Optional[int] =tmp_path / '''data_dot_dot''' directory.mkdir() UpperCAmelCase : Tuple =directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f: f.add(__lowerCAmelCase , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' import tarfile UpperCAmelCase : List[str] =tmp_path / '''data_sym_link''' directory.mkdir() UpperCAmelCase : str =directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__lowerCAmelCase ) with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple: '''simple docstring''' UpperCAmelCase : Dict ={ '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } UpperCAmelCase : Tuple =insecure_tar_files[insecure_tar_file] UpperCAmelCase : int =tmp_path / '''extracted''' TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def lowerCAmelCase_ ( __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Dict =tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 UpperCAmelCase : Union[str, Any] =( b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(__lowerCAmelCase ) assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
348
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) # TODO Update this __snake_case = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """esm""" def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =vocab_size UpperCAmelCase : str =hidden_size UpperCAmelCase : List[Any] =num_hidden_layers UpperCAmelCase : Optional[Any] =num_attention_heads UpperCAmelCase : str =intermediate_size UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : int =attention_probs_dropout_prob UpperCAmelCase : Dict =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : Union[str, Any] =layer_norm_eps UpperCAmelCase : Dict =position_embedding_type UpperCAmelCase : Optional[Any] =use_cache UpperCAmelCase : int =emb_layer_norm_before UpperCAmelCase : List[str] =token_dropout UpperCAmelCase : Optional[Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase : Optional[Any] =EsmFoldConfig() elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ ) UpperCAmelCase : Tuple =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase : Any =get_default_vocab_list() else: UpperCAmelCase : Tuple =vocab_list else: UpperCAmelCase : Optional[int] =None UpperCAmelCase : Union[str, Any] =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , snake_case__ ): UpperCAmelCase : str =self.esmfold_config.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : str = None __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : float = 0 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : int = 128 __lowerCamelCase : "TrunkConfig" = None def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.trunk is None: UpperCAmelCase : str =TrunkConfig() elif isinstance(self.trunk , snake_case__ ): UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =asdict(self ) UpperCAmelCase : Any =self.trunk.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 48 __lowerCamelCase : int = 1024 __lowerCamelCase : int = 128 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : float = 0 __lowerCamelCase : float = 0 __lowerCamelCase : bool = False __lowerCamelCase : int = 4 __lowerCamelCase : Optional[int] = 128 __lowerCamelCase : "StructureModuleConfig" = None def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' if self.structure_module is None: UpperCAmelCase : Any =StructureModuleConfig() elif isinstance(self.structure_module , snake_case__ ): UpperCAmelCase : str =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =asdict(self ) UpperCAmelCase : Tuple =self.structure_module.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 384 __lowerCamelCase : int = 128 __lowerCamelCase : int = 16 __lowerCamelCase : int = 128 __lowerCamelCase : int = 12 __lowerCamelCase : int = 4 __lowerCamelCase : int = 8 __lowerCamelCase : float = 0.1 __lowerCamelCase : int = 8 __lowerCamelCase : int = 1 __lowerCamelCase : int = 2 __lowerCamelCase : int = 7 __lowerCamelCase : int = 10 __lowerCamelCase : float = 1E-8 __lowerCamelCase : float = 1E5 def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return asdict(self ) def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
348
1
from __future__ import annotations def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None )-> None: '''simple docstring''' if start is None: UpperCAmelCase : List[str] =0 if end is None: UpperCAmelCase : str =len(__lowerCAmelCase ) - 1 if start >= end: return UpperCAmelCase : Any =(start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: UpperCAmelCase , UpperCAmelCase : int =sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
348
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,) __lowerCamelCase : List[str] = 10 def UpperCAmelCase__ ( self , **snake_case__ ) -> str: '''simple docstring''' UpperCAmelCase : int ={ '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**snake_case__ ) return config def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : str =self.dummy_model() UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Any =model(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : int =output.prev_sample UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config() UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : Optional[int] =self.dummy_model() UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : str =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =output.prev_sample UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : List[Any] =self.scheduler_classes[0] UpperCAmelCase : Dict =self.get_scheduler_config() UpperCAmelCase : List[str] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ ) UpperCAmelCase : int =self.dummy_model() UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : int =model(snake_case__ , snake_case__ ) UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =output.prev_sample UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) ) if str(snake_case__ ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
348
1
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : torch.FloatTensor class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , snake_case__ = 32 , snake_case__ = 64 , snake_case__ = 20 , snake_case__ = 768 , snake_case__=77 , snake_case__=4 , snake_case__ = 0.0 , snake_case__ = "silu" , snake_case__ = None , snake_case__ = None , snake_case__ = "linear" , snake_case__ = "prd" , snake_case__ = None , snake_case__ = None , snake_case__ = None , ) -> Tuple: '''simple docstring''' super().__init__() UpperCAmelCase : List[Any] =num_attention_heads UpperCAmelCase : Tuple =attention_head_dim UpperCAmelCase : Union[str, Any] =num_attention_heads * attention_head_dim UpperCAmelCase : int =additional_embeddings UpperCAmelCase : List[str] =time_embed_dim or inner_dim UpperCAmelCase : List[str] =embedding_proj_dim or embedding_dim UpperCAmelCase : Any =clip_embed_dim or embedding_dim UpperCAmelCase : Dict =Timesteps(snake_case__ , snake_case__ , 0 ) UpperCAmelCase : Tuple =TimestepEmbedding(snake_case__ , snake_case__ , out_dim=snake_case__ , act_fn=snake_case__ ) UpperCAmelCase : Union[str, Any] =nn.Linear(snake_case__ , snake_case__ ) if embedding_proj_norm_type is None: UpperCAmelCase : Dict =None elif embedding_proj_norm_type == "layer": UpperCAmelCase : int =nn.LayerNorm(snake_case__ ) else: raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' ) UpperCAmelCase : Optional[Any] =nn.Linear(snake_case__ , snake_case__ ) if encoder_hid_proj_type is None: UpperCAmelCase : Optional[int] =None elif encoder_hid_proj_type == "linear": UpperCAmelCase : Any =nn.Linear(snake_case__ , snake_case__ ) else: raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' ) UpperCAmelCase : Dict =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case__ ) ) if added_emb_type == "prd": UpperCAmelCase : Dict =nn.Parameter(torch.zeros(1 , 1 , snake_case__ ) ) elif added_emb_type is None: UpperCAmelCase : int =None else: raise ValueError( f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' ) UpperCAmelCase : List[str] =nn.ModuleList( [ BasicTransformerBlock( snake_case__ , snake_case__ , snake_case__ , dropout=snake_case__ , activation_fn='''gelu''' , attention_bias=snake_case__ , ) for d in range(snake_case__ ) ] ) if norm_in_type == "layer": UpperCAmelCase : Optional[Any] =nn.LayerNorm(snake_case__ ) elif norm_in_type is None: UpperCAmelCase : Union[str, Any] =None else: raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' ) UpperCAmelCase : str =nn.LayerNorm(snake_case__ ) UpperCAmelCase : List[str] =nn.Linear(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 ) causal_attention_mask.triu_(1 ) UpperCAmelCase : Optional[Any] =causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''' , snake_case__ , persistent=snake_case__ ) UpperCAmelCase : int =nn.Parameter(torch.zeros(1 , snake_case__ ) ) UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.zeros(1 , snake_case__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCAmelCase__ ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' UpperCAmelCase : Dict ={} def fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__ ): if hasattr(snake_case__ , '''set_processor''' ): UpperCAmelCase : Tuple =module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' , snake_case__ , snake_case__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__ ) return processors def UpperCAmelCase__ ( self , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : int =len(self.attn_processors.keys() ) if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(snake_case__ )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__ ): if hasattr(snake_case__ , '''set_processor''' ): if not isinstance(snake_case__ , snake_case__ ): module.set_processor(snake_case__ ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' , snake_case__ , snake_case__ ) for name, module in self.named_children(): fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = True , ) -> Any: '''simple docstring''' UpperCAmelCase : List[str] =hidden_states.shape[0] UpperCAmelCase : int =timestep if not torch.is_tensor(snake_case__ ): UpperCAmelCase : Dict =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0: UpperCAmelCase : Optional[int] =timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCAmelCase : Tuple =timesteps * torch.ones(snake_case__ , dtype=timesteps.dtype , device=timesteps.device ) UpperCAmelCase : Union[str, Any] =self.time_proj(snake_case__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. UpperCAmelCase : Any =timesteps_projected.to(dtype=self.dtype ) UpperCAmelCase : Any =self.time_embedding(snake_case__ ) if self.embedding_proj_norm is not None: UpperCAmelCase : Any =self.embedding_proj_norm(snake_case__ ) UpperCAmelCase : Dict =self.embedding_proj(snake_case__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: UpperCAmelCase : Tuple =self.encoder_hidden_states_proj(snake_case__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' ) UpperCAmelCase : int =self.proj_in(snake_case__ ) UpperCAmelCase : Dict =self.positional_embedding.to(hidden_states.dtype ) UpperCAmelCase : List[Any] =[] UpperCAmelCase : List[Any] =0 if encoder_hidden_states is not None: additional_embeds.append(snake_case__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: UpperCAmelCase : List[str] =proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: UpperCAmelCase : Dict =hidden_states[:, None, :] UpperCAmelCase : int =additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: UpperCAmelCase : Optional[Any] =self.prd_embedding.to(hidden_states.dtype ).expand(snake_case__ , -1 , -1 ) additional_embeds.append(snake_case__ ) UpperCAmelCase : int =torch.cat( snake_case__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens UpperCAmelCase : Any =additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: UpperCAmelCase : List[Any] =F.pad( snake_case__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) UpperCAmelCase : Tuple =hidden_states + positional_embeddings if attention_mask is not None: UpperCAmelCase : List[str] =(1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0 UpperCAmelCase : Tuple =F.pad(snake_case__ , (0, self.additional_embeddings) , value=0.0 ) UpperCAmelCase : Dict =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) UpperCAmelCase : Optional[int] =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: UpperCAmelCase : Optional[Any] =self.norm_in(snake_case__ ) for block in self.transformer_blocks: UpperCAmelCase : Optional[int] =block(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase : Optional[int] =self.norm_out(snake_case__ ) if self.prd_embedding is not None: UpperCAmelCase : List[Any] =hidden_states[:, -1] else: UpperCAmelCase : Dict =hidden_states[:, additional_embeddings_len:] UpperCAmelCase : List[str] =self.proj_to_clip_embeddings(snake_case__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : str =(prior_latents * self.clip_std) + self.clip_mean return prior_latents
348
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id ) UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean() UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item()) UpperCAmelCase : List[str] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : str =AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case__ ).to(snake_case__ ) UpperCAmelCase : str =AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : Any =tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids UpperCAmelCase : int =tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids UpperCAmelCase : Optional[Any] =model(input_ids.to(snake_case__ ) , labels=labels.to(snake_case__ ) ).loss UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item()) UpperCAmelCase : Union[str, Any] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[Any] =ort.SessionOptions() UpperCAmelCase : Optional[int] =False return options def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Dict ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : Any =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : str =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : int =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
348
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , snake_case__ , snake_case__=99 , snake_case__=13 , snake_case__=16 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=2 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=30 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=None , ) -> Dict: '''simple docstring''' UpperCAmelCase : int =parent UpperCAmelCase : Union[str, Any] =batch_size UpperCAmelCase : List[Any] =decoder_seq_length # For common tests UpperCAmelCase : int =self.decoder_seq_length UpperCAmelCase : Union[str, Any] =is_training UpperCAmelCase : Tuple =use_attention_mask UpperCAmelCase : int =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : List[str] =d_model UpperCAmelCase : List[str] =d_model UpperCAmelCase : Dict =decoder_layers UpperCAmelCase : str =decoder_layers UpperCAmelCase : List[str] =decoder_ffn_dim UpperCAmelCase : int =decoder_attention_heads UpperCAmelCase : str =decoder_attention_heads UpperCAmelCase : Union[str, Any] =eos_token_id UpperCAmelCase : str =bos_token_id UpperCAmelCase : Union[str, Any] =pad_token_id UpperCAmelCase : Tuple =decoder_start_token_id UpperCAmelCase : Tuple =use_cache UpperCAmelCase : Optional[Any] =max_position_embeddings UpperCAmelCase : str =None UpperCAmelCase : Union[str, Any] =decoder_seq_length UpperCAmelCase : Optional[int] =2 UpperCAmelCase : Optional[int] =1 def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] =None if self.use_attention_mask: UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) UpperCAmelCase : Optional[Any] =None if self.use_labels: UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCAmelCase : Optional[Any] =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> str: '''simple docstring''' UpperCAmelCase : List[str] =True UpperCAmelCase : Tuple =TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval() UpperCAmelCase : Optional[int] =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass UpperCAmelCase : List[Any] =model(snake_case__ , use_cache=snake_case__ ) UpperCAmelCase : Optional[int] =model(snake_case__ ) UpperCAmelCase : Dict =model(snake_case__ , use_cache=snake_case__ ) self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) ) self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 ) UpperCAmelCase : int =outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids UpperCAmelCase : List[Any] =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and UpperCAmelCase : List[Any] =torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Union[str, Any] =model(snake_case__ )['''last_hidden_state'''] UpperCAmelCase : Dict =model(snake_case__ , past_key_values=snake_case__ )['''last_hidden_state'''] # select random slice UpperCAmelCase : Union[str, Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[str] =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() UpperCAmelCase : Tuple =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : str =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any =config_and_inputs UpperCAmelCase : int ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCamelCase : int = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCamelCase : Tuple = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} __lowerCamelCase : Dict = True __lowerCamelCase : Optional[int] = False def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ ) UpperCAmelCase : Tuple =ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*snake_case__ ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' pass
348
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase_ ( )-> int: '''simple docstring''' UpperCAmelCase : str ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase ) return dataset class __snake_case ( lowerCamelCase__ ): def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =get_dataset() UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str =get_dataset() UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ ) self.assertEqual(len(snake_case__ ) , 2 ) print(snake_case__ ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
348
1
import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float: '''simple docstring''' UpperCAmelCase : str =x UpperCAmelCase : str =y for step in range(__lowerCAmelCase ): # noqa: B007 UpperCAmelCase : Union[str, Any] =a * a - b * b + x UpperCAmelCase : Optional[Any] =2 * a * b + y UpperCAmelCase : Dict =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( __lowerCAmelCase )-> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def lowerCAmelCase_ ( __lowerCAmelCase )-> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__lowerCAmelCase , 1 , 1 ) ) def lowerCAmelCase_ ( __lowerCAmelCase = 8_00 , __lowerCAmelCase = 6_00 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , )-> Image.Image: '''simple docstring''' UpperCAmelCase : List[str] =Image.new('''RGB''' , (image_width, image_height) ) UpperCAmelCase : List[str] =img.load() # loop through the image-coordinates for image_x in range(__lowerCAmelCase ): for image_y in range(__lowerCAmelCase ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase : Any =figure_width / image_width * image_height UpperCAmelCase : int =figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase : int =figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase : Optional[Any] =get_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase : Tuple =get_color_coded_rgb(__lowerCAmelCase ) else: UpperCAmelCase : Tuple =get_black_and_white_rgb(__lowerCAmelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure __snake_case = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
348
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str: '''simple docstring''' super().__init__() UpperCAmelCase : Optional[Any] =learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ ) else: UpperCAmelCase : Union[str, Any] =None UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : VQModel __lowerCamelCase : CLIPTextModel __lowerCamelCase : CLIPTokenizer __lowerCamelCase : TransformeraDModel __lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings __lowerCamelCase : VQDiffusionScheduler def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int: '''simple docstring''' super().__init__() self.register_modules( vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1 # get prompt text embeddings UpperCAmelCase : Optional[int] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) UpperCAmelCase : int =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate text embeddings for each generation per prompt UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 ) else: UpperCAmelCase : str =[''''''] * batch_size UpperCAmelCase : Tuple =text_input_ids.shape[-1] UpperCAmelCase : Optional[Any] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , ) UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1] UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 ) UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =1 elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Tuple =len(snake_case__ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' ) UpperCAmelCase : Tuple =batch_size * num_images_per_prompt UpperCAmelCase : List[str] =guidance_scale > 1.0 UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(snake_case__ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1 UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCAmelCase : Any =latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(snake_case__ , device=self.device ) UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device ) UpperCAmelCase : Optional[int] =latents for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 ) UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ ) UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase : Optional[Any] =model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ ) UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ ) UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ ) UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ ) UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase : int =keep_mask[:, :-1, :] UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase : Dict =log_p_x_0.clone() UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0) return rv
348
1
from typing import List from .keymap import KEYMAP, get_character def lowerCAmelCase_ ( __lowerCAmelCase )-> List[Any]: '''simple docstring''' def decorator(__lowerCAmelCase ): UpperCAmelCase : Tuple =getattr(__lowerCAmelCase , '''handle_key''' , [] ) handle += [key] setattr(__lowerCAmelCase , '''handle_key''' , __lowerCAmelCase ) return func return decorator def lowerCAmelCase_ ( *__lowerCAmelCase )-> Dict: '''simple docstring''' def decorator(__lowerCAmelCase ): UpperCAmelCase : Any =getattr(__lowerCAmelCase , '''handle_key''' , [] ) handle += keys setattr(__lowerCAmelCase , '''handle_key''' , __lowerCAmelCase ) return func return decorator class __snake_case ( lowerCamelCase__ ): def __new__( cls , snake_case__ , snake_case__ , snake_case__ ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[int] =super().__new__(cls , snake_case__ , snake_case__ , snake_case__ ) if not hasattr(snake_case__ , '''key_handler''' ): setattr(snake_case__ , '''key_handler''' , {} ) setattr(snake_case__ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): UpperCAmelCase : str =getattr(snake_case__ , '''handle_key''' , [] ) for key in handled_keys: UpperCAmelCase : Any =value return new_cls @staticmethod def UpperCAmelCase__ ( cls ) -> int: '''simple docstring''' UpperCAmelCase : str =get_character() if char != KEYMAP["undefined"]: UpperCAmelCase : Dict =ord(snake_case__ ) UpperCAmelCase : Optional[int] =cls.key_handler.get(snake_case__ ) if handler: UpperCAmelCase : Optional[int] =char return handler(cls ) else: return None def lowerCAmelCase_ ( cls )-> Optional[Any]: '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
348
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =self.dummy_uncond_unet UpperCAmelCase : Optional[int] =KarrasVeScheduler() UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[str] =torch.manual_seed(0 ) UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : str =torch.manual_seed(0 ) UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0] UpperCAmelCase : Any =image[0, -3:, -3:, -1] UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256''' UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ ) UpperCAmelCase : Dict =KarrasVeScheduler() UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Any =torch.manual_seed(0 ) UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
348
1
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __snake_case ( nn.Module ): def __init__( self ) -> List[Any]: '''simple docstring''' super().__init__() UpperCAmelCase : List[Any] =nn.Linear(3 , 4 ) UpperCAmelCase : Any =nn.BatchNormad(4 ) UpperCAmelCase : List[str] =nn.Linear(4 , 5 ) def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[int]: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) ) class __snake_case ( lowerCamelCase__ ): def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ) -> Any: '''simple docstring''' return (args[0] + 1,) + args[1:], kwargs class __snake_case ( lowerCamelCase__ ): def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' return output + 1 class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Optional[int] =ModelForTest() UpperCAmelCase : Tuple =ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) self.assertEqual(test_model._hf_hook , snake_case__ ) self.assertTrue(hasattr(snake_case__ , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , '''_hf_hook''' ) ) self.assertFalse(hasattr(snake_case__ , '''_old_forward''' ) ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =ModelForTest() UpperCAmelCase : Optional[int] =ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) add_hook_to_module(snake_case__ , snake_case__ , append=snake_case__ ) self.assertEqual(isinstance(test_model._hf_hook , snake_case__ ) , snake_case__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(snake_case__ , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , '''_hf_hook''' ) ) self.assertFalse(hasattr(snake_case__ , '''_old_forward''' ) ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Dict =ModelForTest() UpperCAmelCase : Tuple =torch.randn(2 , 3 ) UpperCAmelCase : Union[str, Any] =test_model(x + 1 ) UpperCAmelCase : Optional[int] =test_model(x + 2 ) UpperCAmelCase : Union[str, Any] =PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain UpperCAmelCase : Dict =PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) UpperCAmelCase : Optional[Any] =test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks UpperCAmelCase : int =SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =test_model(snake_case__ ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Tuple =ModelForTest() UpperCAmelCase : Any =torch.randn(2 , 3 ) UpperCAmelCase : int =test_model(snake_case__ ) UpperCAmelCase : List[str] =PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain UpperCAmelCase : List[Any] =PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) UpperCAmelCase : List[Any] =test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks UpperCAmelCase : Optional[Any] =SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) UpperCAmelCase : List[Any] =test_model(snake_case__ ) assert torch.allclose(snake_case__ , output + 2 , atol=1e-5 ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =ModelForTest() UpperCAmelCase : str =torch.randn(2 , 3 ) UpperCAmelCase : List[Any] =test_model(snake_case__ ) UpperCAmelCase : int =PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) UpperCAmelCase : Any =test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) UpperCAmelCase : str =True UpperCAmelCase : str =test_model(snake_case__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[int] =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device UpperCAmelCase : Optional[int] =torch.randn(2 , 3 ) UpperCAmelCase : Optional[int] =model(snake_case__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(snake_case__ , AlignDevicesHook(io_same_device=snake_case__ ) ) UpperCAmelCase : str =torch.randn(2 , 3 ).to(0 ) UpperCAmelCase : List[Any] =model(snake_case__ ) self.assertEqual(output.device , torch.device(0 ) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : int =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices UpperCAmelCase : str ={'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True} add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device UpperCAmelCase : Any =torch.device(hook_kwargs['''execution_device'''] ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) UpperCAmelCase : Union[str, Any] =torch.randn(2 , 3 ) UpperCAmelCase : Tuple =model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload UpperCAmelCase : Any ={ '''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True, '''offload_buffers''': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) UpperCAmelCase : int =torch.randn(2 , 3 ) UpperCAmelCase : List[str] =model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Dict =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices UpperCAmelCase : List[str] =0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device UpperCAmelCase : Tuple =torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) UpperCAmelCase : Dict =torch.randn(2 , 3 ) UpperCAmelCase : Union[str, Any] =model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ , offload_buffers=snake_case__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) UpperCAmelCase : List[Any] =torch.randn(2 , 3 ) UpperCAmelCase : str =model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Tuple =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices UpperCAmelCase : Optional[int] =0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device UpperCAmelCase : Optional[int] =torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) UpperCAmelCase : List[str] =torch.randn(2 , 3 ) UpperCAmelCase : Union[str, Any] =model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() , offload_buffers=snake_case__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) UpperCAmelCase : Optional[int] =torch.randn(2 , 3 ) UpperCAmelCase : Dict =model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
348
import qiskit def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' ) UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = half_adder(1, 1) print(f'Half Adder Output Qubit Counts: {counts}')
348
1
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any: '''simple docstring''' assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict: '''simple docstring''' UpperCAmelCase : Dict =tmp_path / '''cache''' UpperCAmelCase : Any ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : List[Any] =ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict: '''simple docstring''' UpperCAmelCase : Optional[Any] =tmp_path / '''cache''' UpperCAmelCase : Tuple ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : List[Any] =features.copy() if features else default_expected_features UpperCAmelCase : List[str] =( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : Optional[int] =ParquetDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =tmp_path / '''cache''' UpperCAmelCase : Optional[Any] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : Optional[int] =ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]: '''simple docstring''' if issubclass(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Tuple =parquet_path elif issubclass(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Union[str, Any] =[parquet_path] UpperCAmelCase : str =tmp_path / '''cache''' UpperCAmelCase : List[Any] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : List[str] =ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=("train",) )-> List[str]: '''simple docstring''' assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for split in splits: UpperCAmelCase : Dict =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]: '''simple docstring''' UpperCAmelCase : int =tmp_path / '''cache''' UpperCAmelCase : List[Any] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : str =ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' UpperCAmelCase : Any =tmp_path / '''cache''' UpperCAmelCase : int ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : int =features.copy() if features else default_expected_features UpperCAmelCase : str =( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : List[str] =ParquetDatasetReader({'''train''': parquet_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int: '''simple docstring''' if split: UpperCAmelCase : Any ={split: parquet_path} else: UpperCAmelCase : Optional[Any] ='''train''' UpperCAmelCase : List[str] ={'''train''': parquet_path, '''test''': parquet_path} UpperCAmelCase : List[Any] =tmp_path / '''cache''' UpperCAmelCase : Optional[Any] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : List[str] =ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' UpperCAmelCase : str =ParquetDatasetWriter(__lowerCAmelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 UpperCAmelCase : Union[str, Any] =pq.ParquetFile(tmp_path / '''foo.parquet''' ) UpperCAmelCase : Any =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =str(shared_datadir / '''test_image_rgb.jpg''' ) UpperCAmelCase : Any ={'''image''': [image_path]} UpperCAmelCase : List[str] =Features({'''image''': Image()} ) UpperCAmelCase : Tuple =Dataset.from_dict(__lowerCAmelCase , features=__lowerCAmelCase ) UpperCAmelCase : int =ParquetDatasetWriter(__lowerCAmelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 UpperCAmelCase : Optional[int] =Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features UpperCAmelCase : Optional[int] =ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]: '''simple docstring''' assert get_writer_batch_size(__lowerCAmelCase ) == expected
348
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __snake_case : __lowerCamelCase : str = BlenderbotConfig __lowerCamelCase : Optional[Any] = {} __lowerCamelCase : Optional[int] = """gelu""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : Dict =seq_length UpperCAmelCase : Optional[Any] =is_training UpperCAmelCase : List[str] =use_labels UpperCAmelCase : List[Any] =vocab_size UpperCAmelCase : Optional[int] =hidden_size UpperCAmelCase : Tuple =num_hidden_layers UpperCAmelCase : Any =num_attention_heads UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : Optional[int] =attention_probs_dropout_prob UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : List[Any] =eos_token_id UpperCAmelCase : Optional[int] =pad_token_id UpperCAmelCase : Tuple =bos_token_id def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[Any] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder() UpperCAmelCase : Any =inputs_dict['''input_ids'''] UpperCAmelCase : str =input_ids[:1, :] UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase : Tuple =inputs_dict['''head_mask'''] UpperCAmelCase : List[Any] =1 # first forward pass UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str: '''simple docstring''' if attention_mask is None: UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : Tuple =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase : Dict = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] =TFBlenderbotModelTester(self ) UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class __snake_case ( unittest.TestCase ): __lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""] __lowerCamelCase : Dict = """facebook/blenderbot-400M-distill""" @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' ) UpperCAmelCase : Optional[int] =self.model.generate( model_inputs.input_ids , ) UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
348
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]: '''simple docstring''' UpperCAmelCase : Any =SwinConfig( embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , ) UpperCAmelCase : List[str] =DetaConfig( backbone_config=__lowerCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__lowerCAmelCase , with_box_refine=__lowerCAmelCase , two_stage=__lowerCAmelCase , ) # set labels UpperCAmelCase : Optional[int] ='''huggingface/label-files''' if "o365" in model_name: UpperCAmelCase : Tuple =3_66 UpperCAmelCase : str ='''object365-id2label.json''' else: UpperCAmelCase : str =91 UpperCAmelCase : Tuple ='''coco-detection-id2label.json''' UpperCAmelCase : Optional[Any] =num_labels UpperCAmelCase : Optional[Any] =json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) UpperCAmelCase : List[str] ={int(__lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : List[str] =idalabel UpperCAmelCase : int ={v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =[] # stem # fmt: off rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') ) rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') ) rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') ) rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') ) rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') ) rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any: '''simple docstring''' UpperCAmelCase : Dict =dct.pop(__lowerCAmelCase ) UpperCAmelCase : str =val def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> int: '''simple docstring''' UpperCAmelCase : Dict =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase : str =num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase : Dict =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) UpperCAmelCase : Tuple =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : List[str] =in_proj_weight[:dim, :] UpperCAmelCase : int =in_proj_bias[: dim] UpperCAmelCase : Optional[int] =in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase : Dict =in_proj_bias[ dim : dim * 2 ] UpperCAmelCase : Dict =in_proj_weight[ -dim :, : ] UpperCAmelCase : Any =in_proj_bias[-dim :] # fmt: on def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase : List[Any] =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCAmelCase : List[Any] =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : int =in_proj_weight[:hidden_size, :] UpperCAmelCase : int =in_proj_bias[:hidden_size] UpperCAmelCase : List[Any] =in_proj_weight[ hidden_size : hidden_size * 2, : ] UpperCAmelCase : int =in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase : Optional[int] =in_proj_weight[-hidden_size:, :] UpperCAmelCase : str =in_proj_bias[-hidden_size:] def lowerCAmelCase_ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : str ='''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : int =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple: '''simple docstring''' UpperCAmelCase : int =get_deta_config(__lowerCAmelCase ) # load original state dict if model_name == "deta-swin-large": UpperCAmelCase : Union[str, Any] =hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' ) elif model_name == "deta-swin-large-o365": UpperCAmelCase : Any =hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' ) else: raise ValueError(f'''Model name {model_name} not supported''' ) UpperCAmelCase : Any =torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model'''] # original state dict for name, param in state_dict.items(): print(__lowerCAmelCase , param.shape ) # rename keys UpperCAmelCase : Any =create_rename_keys(__lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config ) read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: UpperCAmelCase : Tuple =state_dict.pop(__lowerCAmelCase ) UpperCAmelCase : str =val if "input_proj" in key: UpperCAmelCase : int =state_dict.pop(__lowerCAmelCase ) UpperCAmelCase : List[Any] =val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: UpperCAmelCase : Union[str, Any] =state_dict.pop(__lowerCAmelCase ) UpperCAmelCase : List[str] =val # finally, create HuggingFace model and load state dict UpperCAmelCase : int =DetaForObjectDetection(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() UpperCAmelCase : Union[str, Any] ='''cuda''' if torch.cuda.is_available() else '''cpu''' model.to(__lowerCAmelCase ) # load image processor UpperCAmelCase : str =DetaImageProcessor(format='''coco_detection''' ) # verify our conversion on image UpperCAmelCase : Union[str, Any] =prepare_img() UpperCAmelCase : Optional[int] =processor(images=__lowerCAmelCase , return_tensors='''pt''' ) UpperCAmelCase : str =encoding['''pixel_values'''] UpperCAmelCase : List[str] =model(pixel_values.to(__lowerCAmelCase ) ) # verify logits print('''Logits:''' , outputs.logits[0, :3, :3] ) print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": UpperCAmelCase : Dict =torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) UpperCAmelCase : Optional[Any] =torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": UpperCAmelCase : Dict =torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) UpperCAmelCase : str =torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCAmelCase ) , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCAmelCase ) , atol=1e-4 ) print('''Everything ok!''' ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) # Push to hub if push_to_hub: print('''Pushing model and processor to hub...''' ) model.push_to_hub(f'''jozhang97/{model_name}''' ) processor.push_to_hub(f'''jozhang97/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
348
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = """sew-d""" def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int: '''simple docstring''' super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) UpperCAmelCase : Union[str, Any] =hidden_size UpperCAmelCase : Union[str, Any] =feat_extract_norm UpperCAmelCase : Optional[Any] =feat_extract_activation UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : int =list(snake_case__ ) UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : str =conv_bias UpperCAmelCase : Tuple =num_conv_pos_embeddings UpperCAmelCase : Dict =num_conv_pos_embedding_groups UpperCAmelCase : str =len(self.conv_dim ) UpperCAmelCase : Dict =num_hidden_layers UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : List[Any] =squeeze_factor UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : int =position_buckets UpperCAmelCase : Optional[int] =share_att_key UpperCAmelCase : Optional[int] =relative_attention UpperCAmelCase : Tuple =norm_rel_ebd UpperCAmelCase : List[Any] =list(snake_case__ ) UpperCAmelCase : Dict =hidden_act UpperCAmelCase : Optional[int] =num_attention_heads UpperCAmelCase : Any =hidden_dropout UpperCAmelCase : str =attention_dropout UpperCAmelCase : Union[str, Any] =activation_dropout UpperCAmelCase : str =feat_proj_dropout UpperCAmelCase : Union[str, Any] =final_dropout UpperCAmelCase : Optional[int] =layer_norm_eps UpperCAmelCase : str =feature_layer_norm_eps UpperCAmelCase : str =initializer_range UpperCAmelCase : Any =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Union[str, Any] =apply_spec_augment UpperCAmelCase : Optional[Any] =mask_time_prob UpperCAmelCase : Tuple =mask_time_length UpperCAmelCase : str =mask_time_min_masks UpperCAmelCase : Optional[int] =mask_feature_prob UpperCAmelCase : Optional[Any] =mask_feature_length UpperCAmelCase : List[Any] =mask_feature_min_masks # ctc loss UpperCAmelCase : str =ctc_loss_reduction UpperCAmelCase : Optional[int] =ctc_zero_infinity # sequence classification UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum UpperCAmelCase : int =classifier_proj_size @property def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
1
from numpy import exp, pi, sqrt def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> int: '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
348
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __snake_case = 4 __snake_case = 3 class __snake_case ( lowerCamelCase__ ): pass def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]: '''simple docstring''' for shard in shards: for i in range(__lowerCAmelCase ): yield {"i": i, "shard": shard} def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =int(os.environ['''RANK'''] ) UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] ) UpperCAmelCase : List[Any] =ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCAmelCase ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase ) parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 ) UpperCAmelCase : Any =parser.parse_args() UpperCAmelCase : List[str] =args.streaming UpperCAmelCase : Tuple =args.num_workers UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]} UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase ) if not streaming: UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) ) UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase ) UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase ) UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCAmelCase : str =full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCAmelCase : List[Any] =sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
348
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __snake_case = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class __snake_case ( lowerCamelCase__ ): @staticmethod def UpperCAmelCase__ ( snake_case__ ) -> List[str]: '''simple docstring''' UpperCAmelCase : Tuple =parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=snake_case__ , required=snake_case__ , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=snake_case__ , required=snake_case__ , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=snake_case__ , required=snake_case__ , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=snake_case__ , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=snake_case__ , default=snake_case__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=snake_case__ ) def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[str] =logging.get_logger('''transformers-cli/converting''' ) self._logger.info(f'''Loading model {model_type}''' ) UpperCAmelCase : List[Any] =model_type UpperCAmelCase : Optional[int] =tf_checkpoint UpperCAmelCase : Union[str, Any] =pytorch_dump_output UpperCAmelCase : Optional[int] =config UpperCAmelCase : Dict =finetuning_task_name def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case__ ) if "ckpt" in self._tf_checkpoint.lower(): UpperCAmelCase : Optional[int] =self._tf_checkpoint UpperCAmelCase : Optional[int] ='''''' else: UpperCAmelCase : int =self._tf_checkpoint UpperCAmelCase : Union[str, Any] ='''''' convert_transfo_xl_checkpoint_to_pytorch( snake_case__ , self._config , self._pytorch_dump_output , snake_case__ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case__ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case__ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
348
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False )-> str: '''simple docstring''' UpperCAmelCase : Optional[int] =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCAmelCase : Optional[Any] =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> Dict: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase : Union[str, Any] ='''''' else: UpperCAmelCase : int ='''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Any =state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase : Union[str, Any] =state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : Dict =in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase : Union[str, Any] =in_proj_bias[: config.hidden_size] UpperCAmelCase : Any =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : Optional[int] =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : Optional[Any] =in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : Dict =in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple: '''simple docstring''' UpperCAmelCase : Optional[int] =['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Any =[ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =dct.pop(__lowerCAmelCase ) UpperCAmelCase : str =val def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =ViTMSNConfig() UpperCAmelCase : Dict =10_00 UpperCAmelCase : Any ='''datasets/huggingface/label-files''' UpperCAmelCase : Dict ='''imagenet-1k-id2label.json''' UpperCAmelCase : str =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , '''r''' ) ) UpperCAmelCase : Dict ={int(__lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : List[Any] =idalabel UpperCAmelCase : List[Any] ={v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: UpperCAmelCase : Tuple =3_84 UpperCAmelCase : int =15_36 UpperCAmelCase : int =6 elif "l16" in checkpoint_url: UpperCAmelCase : Optional[Any] =10_24 UpperCAmelCase : Optional[Any] =40_96 UpperCAmelCase : List[str] =24 UpperCAmelCase : List[str] =16 UpperCAmelCase : List[Any] =0.1 elif "b4" in checkpoint_url: UpperCAmelCase : List[Any] =4 elif "l7" in checkpoint_url: UpperCAmelCase : Optional[int] =7 UpperCAmelCase : str =10_24 UpperCAmelCase : Any =40_96 UpperCAmelCase : Optional[int] =24 UpperCAmelCase : List[str] =16 UpperCAmelCase : Tuple =0.1 UpperCAmelCase : List[str] =ViTMSNModel(__lowerCAmelCase ) UpperCAmelCase : Dict =torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )['''target_encoder'''] UpperCAmelCase : Optional[Any] =ViTImageProcessor(size=config.image_size ) remove_projection_head(__lowerCAmelCase ) UpperCAmelCase : Union[str, Any] =create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() UpperCAmelCase : Any ='''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) UpperCAmelCase : List[Any] =ViTImageProcessor( size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase ) UpperCAmelCase : Optional[Any] =image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) UpperCAmelCase : Tuple =model(**__lowerCAmelCase ) UpperCAmelCase : Tuple =outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: UpperCAmelCase : str =torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: UpperCAmelCase : Union[str, Any] =torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: UpperCAmelCase : List[str] =torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: UpperCAmelCase : Union[str, Any] =torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: UpperCAmelCase : int =torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1e-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __snake_case = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
348
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __snake_case : def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str: '''simple docstring''' UpperCAmelCase : str =parent UpperCAmelCase : Tuple =batch_size UpperCAmelCase : Optional[int] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Tuple =use_input_mask UpperCAmelCase : List[Any] =use_token_type_ids UpperCAmelCase : Optional[Any] =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : List[Any] =hidden_size UpperCAmelCase : Optional[int] =rotary_dim UpperCAmelCase : Union[str, Any] =num_hidden_layers UpperCAmelCase : List[Any] =num_attention_heads UpperCAmelCase : Dict =intermediate_size UpperCAmelCase : Union[str, Any] =hidden_act UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : Dict =attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : str =initializer_range UpperCAmelCase : Optional[int] =None UpperCAmelCase : List[Any] =vocab_size - 1 UpperCAmelCase : Optional[Any] =vocab_size - 1 UpperCAmelCase : List[Any] =vocab_size - 1 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] =None if self.use_input_mask: UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Dict =GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =20 UpperCAmelCase : Any =model_class_name(snake_case__ ) UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =model( input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , ) UpperCAmelCase : List[Any] =model(snake_case__ ) UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Dict =20 UpperCAmelCase : Dict =model_class_name(snake_case__ ) UpperCAmelCase : Tuple =jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : int =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : str =model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) @tooslow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ ) UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : str =False UpperCAmelCase : Union[str, Any] =model.config.eos_token_id UpperCAmelCase : List[Any] =jax.jit(model.generate ) UpperCAmelCase : Dict =jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) UpperCAmelCase : Tuple =[ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(snake_case__ , snake_case__ ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : int =0 UpperCAmelCase : Optional[int] =1 UpperCAmelCase : Optional[int] =0 UpperCAmelCase : Union[str, Any] =1 UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval() UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ ) UpperCAmelCase : Union[str, Any] =fx_state with torch.no_grad(): UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case__ ) UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ ) UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : int =getattr(snake_case__ , snake_case__ ) UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval() UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params ) UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : str =0 UpperCAmelCase : Any =1 UpperCAmelCase : List[Any] =0 UpperCAmelCase : Tuple =1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case__ ) UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ ) with torch.no_grad(): UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : Tuple =model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ )
348
1
import heapq as hq import math from collections.abc import Iterator class __snake_case : def __init__( self , snake_case__ ) -> str: '''simple docstring''' UpperCAmelCase : List[Any] =str(id_ ) UpperCAmelCase : Tuple =None UpperCAmelCase : str =None UpperCAmelCase : Optional[int] =[] UpperCAmelCase : str ={} # {vertex:distance} def __lt__( self , snake_case__ ) -> str: '''simple docstring''' return self.key < other.key def __repr__( self ) -> Optional[Any]: '''simple docstring''' return self.id def UpperCAmelCase__ ( self , snake_case__ ) -> Dict: '''simple docstring''' self.neighbors.append(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' UpperCAmelCase : Dict =weight def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]: '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , __lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> list: '''simple docstring''' UpperCAmelCase : Optional[Any] =[] for u in graph: UpperCAmelCase : List[str] =math.inf UpperCAmelCase : List[str] =None UpperCAmelCase : Tuple =0 UpperCAmelCase : Optional[Any] =graph[:] while q: UpperCAmelCase : List[str] =min(__lowerCAmelCase ) q.remove(__lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): UpperCAmelCase : Union[str, Any] =u UpperCAmelCase : Any =u.edges[v.id] for i in range(1 , len(__lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Iterator[tuple]: '''simple docstring''' for u in graph: UpperCAmelCase : Tuple =math.inf UpperCAmelCase : Union[str, Any] =None UpperCAmelCase : Union[str, Any] =0 UpperCAmelCase : Any =list(__lowerCAmelCase ) hq.heapify(__lowerCAmelCase ) while h: UpperCAmelCase : Any =hq.heappop(__lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): UpperCAmelCase : Optional[Any] =u UpperCAmelCase : Dict =u.edges[v.id] hq.heapify(__lowerCAmelCase ) for i in range(1 , len(__lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowerCAmelCase_ ( )-> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
348
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> int: '''simple docstring''' UpperCAmelCase : Optional[int] =1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): UpperCAmelCase : Any =n - k # Calculate C(n,k) for i in range(__lowerCAmelCase ): result *= n - i result //= i + 1 return result def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' return binomial_coefficient(2 * node_count , __lowerCAmelCase ) // (node_count + 1) def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' if n < 0: raise ValueError('''factorial() not defined for negative values''' ) UpperCAmelCase : List[Any] =1 for i in range(1 , n + 1 ): result *= i return result def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' return catalan_number(__lowerCAmelCase ) * factorial(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = int(input('''Enter the number of nodes: ''').strip() or 0) if node_count <= 0: raise ValueError('''We need some nodes to work with.''') print( f'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' f'binary trees and {catalan_number(node_count)} binary search trees.' )
348
import os from typing import Dict, List, Tuple, TypeVar, Union __snake_case = TypeVar('''T''') __snake_case = Union[List[T], Tuple[T, ...]] __snake_case = Union[T, List[T], Dict[str, T]] __snake_case = Union[str, bytes, os.PathLike]
348
1
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __snake_case = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' __snake_case = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' __snake_case = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple: '''simple docstring''' return float((preds == labels).mean() ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="binary" )-> str: '''simple docstring''' UpperCAmelCase : str =simple_accuracy(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : str =float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase , average=__lowerCAmelCase ) ) return { "accuracy": acc, "f1": fa, } def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any: '''simple docstring''' UpperCAmelCase : List[str] ={} for id_pred, label in zip(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Optional[int] =f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' UpperCAmelCase : List[Any] =id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCAmelCase : List[str] =[(pred, label)] UpperCAmelCase , UpperCAmelCase : str =[], [] for question, preds_labels in question_map.items(): UpperCAmelCase , UpperCAmelCase : Union[str, Any] =zip(*__lowerCAmelCase ) UpperCAmelCase : List[str] =fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase , average='''macro''' ) fas.append(__lowerCAmelCase ) UpperCAmelCase : List[str] =int(sum(pred == label for pred, label in preds_labels ) == len(__lowerCAmelCase ) ) ems.append(__lowerCAmelCase ) UpperCAmelCase : Optional[int] =float(sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) ) UpperCAmelCase : int =sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) UpperCAmelCase : List[str] =float(fa_score(y_true=__lowerCAmelCase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(snake_case__ , snake_case__ )} elif self.config_name == "cb": return acc_and_fa(snake_case__ , snake_case__ , fa_avg='''macro''' ) elif self.config_name == "record": UpperCAmelCase : Optional[int] =[ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] UpperCAmelCase : Optional[Any] ={pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(snake_case__ , snake_case__ )[0] elif self.config_name == "multirc": return evaluate_multirc(snake_case__ , snake_case__ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
348
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } __snake_case = '''▁''' class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Dict = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = BigBirdTokenizer __lowerCamelCase : Any = ["""input_ids""", """attention_mask"""] __lowerCamelCase : List[int] = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase : Tuple =vocab_file UpperCAmelCase : Optional[int] =False if not self.vocab_file else True def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : int =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase : Optional[int] =os.path.join( snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
348
1
from __future__ import annotations def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> float: '''simple docstring''' UpperCAmelCase : Optional[Any] =sorted(numsa + numsa ) UpperCAmelCase , UpperCAmelCase : int =divmod(len(__lowerCAmelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __snake_case = [float(x) for x in input('''Enter the elements of first array: ''').split()] __snake_case = [float(x) for x in input('''Enter the elements of second array: ''').split()] print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
348
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool: UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCAmelCase : List[Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__lowerCAmelCase ) ) # The ratio of the area for circle to square is pi/4. UpperCAmelCase : Dict =proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float: '''simple docstring''' return mean( function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None: '''simple docstring''' def identity_function(__lowerCAmelCase ) -> float: return x UpperCAmelCase : List[Any] =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('''******************''' ) def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' def function_to_integrate(__lowerCAmelCase ) -> float: return sqrt(4.0 - x * x ) UpperCAmelCase : Dict =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[Any] = """levit""" def __init__( self , snake_case__=224 , snake_case__=3 , snake_case__=3 , snake_case__=2 , snake_case__=1 , snake_case__=16 , snake_case__=[128, 256, 384] , snake_case__=[4, 8, 12] , snake_case__=[4, 4, 4] , snake_case__=[16, 16, 16] , snake_case__=0 , snake_case__=[2, 2, 2] , snake_case__=[2, 2, 2] , snake_case__=0.02 , **snake_case__ , ) -> List[str]: '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase : Any =image_size UpperCAmelCase : List[Any] =num_channels UpperCAmelCase : Any =kernel_size UpperCAmelCase : Optional[Any] =stride UpperCAmelCase : int =padding UpperCAmelCase : Dict =hidden_sizes UpperCAmelCase : List[str] =num_attention_heads UpperCAmelCase : Union[str, Any] =depths UpperCAmelCase : Optional[Any] =key_dim UpperCAmelCase : Optional[int] =drop_path_rate UpperCAmelCase : str =patch_size UpperCAmelCase : Optional[Any] =attention_ratio UpperCAmelCase : Union[str, Any] =mlp_ratio UpperCAmelCase : Optional[Any] =initializer_range UpperCAmelCase : Union[str, Any] =[ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self ) -> float: '''simple docstring''' return 1e-4
348
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : List[Any] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Union[str, Any] =use_input_mask UpperCAmelCase : Tuple =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : Tuple =hidden_size UpperCAmelCase : Dict =projection_dim UpperCAmelCase : Optional[int] =num_hidden_layers UpperCAmelCase : Dict =num_attention_heads UpperCAmelCase : int =intermediate_size UpperCAmelCase : Any =dropout UpperCAmelCase : Union[str, Any] =attention_dropout UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : str =scope UpperCAmelCase : str =bos_token_id def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : int =None if self.use_input_mask: UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Optional[int] =input_mask.numpy() UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : List[Any] =1 UpperCAmelCase : Tuple =0 UpperCAmelCase : List[Any] =self.get_config() return config, input_ids, tf.convert_to_tensor(snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ ) UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ ) UpperCAmelCase : str =model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () __lowerCamelCase : Dict = False __lowerCamelCase : Optional[Any] = False __lowerCamelCase : Dict = False def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : str =BlipTextModelTester(self ) UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__=True ) -> Any: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
348
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> Optional[int]: '''simple docstring''' model.train() UpperCAmelCase : Tuple =model(__lowerCAmelCase ) UpperCAmelCase : List[Any] =F.mse_loss(__lowerCAmelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False )-> List[Any]: '''simple docstring''' set_seed(42 ) UpperCAmelCase : Union[str, Any] =RegressionModel() UpperCAmelCase : Any =deepcopy(__lowerCAmelCase ) UpperCAmelCase : Optional[Any] =RegressionDataset(length=80 ) UpperCAmelCase : str =DataLoader(__lowerCAmelCase , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase : Tuple =AdamW(params=model.parameters() , lr=1e-3 ) UpperCAmelCase : List[Any] =AdamW(params=ddp_model.parameters() , lr=1e-3 ) UpperCAmelCase : Optional[int] =LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) UpperCAmelCase : Optional[int] =LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: UpperCAmelCase , UpperCAmelCase : List[Any] =accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( __lowerCAmelCase )-> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] =get_training_setup(__lowerCAmelCase ) # Use a single batch UpperCAmelCase , UpperCAmelCase : Union[str, Any] =next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Tuple =accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase : Any =ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =get_training_setup(__lowerCAmelCase ) # Use a single batch UpperCAmelCase , UpperCAmelCase : List[Any] =next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Union[str, Any] =accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : str =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase : List[Any] =ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def lowerCAmelCase_ ( __lowerCAmelCase=False , __lowerCAmelCase=False )-> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple =get_training_setup(__lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): UpperCAmelCase , UpperCAmelCase : Optional[Any] =batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Optional[int] =accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Tuple =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase : Tuple =ddp_input[torch.randperm(len(__lowerCAmelCase ) )] GradientState._reset_state() def lowerCAmelCase_ ( __lowerCAmelCase=False , __lowerCAmelCase=False )-> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =get_training_setup(__lowerCAmelCase , __lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): UpperCAmelCase , UpperCAmelCase : str =batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : List[Any] =accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : List[Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' UpperCAmelCase : List[str] =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase )) if accelerator.num_processes > 1: check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( )-> Dict: '''simple docstring''' UpperCAmelCase : Optional[Any] =Accelerator() UpperCAmelCase : Union[str, Any] =RegressionDataset(length=80 ) UpperCAmelCase : List[Any] =DataLoader(__lowerCAmelCase , batch_size=16 ) UpperCAmelCase : int =RegressionDataset(length=96 ) UpperCAmelCase : List[Any] =DataLoader(__lowerCAmelCase , batch_size=16 ) UpperCAmelCase , UpperCAmelCase : Tuple =accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if iteration < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if batch_num < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( )-> int: '''simple docstring''' UpperCAmelCase : Dict =Accelerator() UpperCAmelCase : Any =accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(__lowerCAmelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(__lowerCAmelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
348
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase ) UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase ) return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = CLIPConfig __lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""] def __init__( self , snake_case__ ) -> Dict: '''simple docstring''' super().__init__(snake_case__ ) UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config ) UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ ) UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ ) UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ ) @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy() UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy() UpperCAmelCase : Tuple =[] UpperCAmelCase : Dict =image_embeds.shape[0] for i in range(snake_case__ ): UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : str =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx] UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCAmelCase : int =0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCAmelCase : Any =cos_dist[i][concept_idx] UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item() UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case__ ) result.append(snake_case__ ) UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : List[str] =self.visual_projection(snake_case__ ) UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds ) UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : Optional[Any] =0.0 UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 ) UpperCAmelCase : List[Any] =special_care * 0.01 UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
348
1
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __snake_case = logging.get_logger(__name__) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = ["""pixel_values"""] def __init__( self , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = 8 , **snake_case__ , ) -> None: '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase : List[Any] =do_rescale UpperCAmelCase : List[Any] =rescale_factor UpperCAmelCase : int =do_pad UpperCAmelCase : List[str] =pad_size def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ) -> np.ndarray: '''simple docstring''' return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ) -> str: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int =get_image_size(snake_case__ ) UpperCAmelCase : str =(old_height // size + 1) * size - old_height UpperCAmelCase : List[str] =(old_width // size + 1) * size - old_width return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> int: '''simple docstring''' UpperCAmelCase : List[str] =do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : Tuple =do_pad if do_pad is not None else self.do_pad UpperCAmelCase : Any =pad_size if pad_size is not None else self.pad_size UpperCAmelCase : Union[str, Any] =make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase : Dict =[to_numpy_array(snake_case__ ) for image in images] if do_rescale: UpperCAmelCase : Union[str, Any] =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images] if do_pad: UpperCAmelCase : Tuple =[self.pad(snake_case__ , size=snake_case__ ) for image in images] UpperCAmelCase : List[str] =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] UpperCAmelCase : Optional[Any] ={'''pixel_values''': images} return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
348
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __snake_case = parser.parse_args() __snake_case = '''cpu''' __snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __snake_case = '''path-to-your-trained-model''' __snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __snake_case = pipe.to(device) # to channels last __snake_case = pipe.unet.to(memory_format=torch.channels_last) __snake_case = pipe.vae.to(memory_format=torch.channels_last) __snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __snake_case = torch.randn(2, 4, 64, 64) __snake_case = torch.rand(1) * 9_99 __snake_case = torch.randn(2, 77, 7_68) __snake_case = (sample, timestep, encoder_hidden_status) try: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __snake_case = 6_66 __snake_case = torch.Generator(device).manual_seed(seed) __snake_case = {'''generator''': generator} if args.steps is not None: __snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
348
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __snake_case = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') __snake_case = ( subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('''utf-8''').split() ) __snake_case = '''|'''.join(sys.argv[1:]) __snake_case = re.compile(rf'^({joined_dirs}).*?\.py$') __snake_case = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
348
__snake_case = '''Input must be a string of 8 numbers plus letter''' __snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE''' def lowerCAmelCase_ ( __lowerCAmelCase )-> bool: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}''' raise TypeError(__lowerCAmelCase ) UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper() if len(__lowerCAmelCase ) != 9: raise ValueError(__lowerCAmelCase ) try: UpperCAmelCase : int =int(spanish_id_clean[0:8] ) UpperCAmelCase : Optional[int] =spanish_id_clean[8] except ValueError as ex: raise ValueError(__lowerCAmelCase ) from ex if letter.isdigit(): raise ValueError(__lowerCAmelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
348
1
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __snake_case = logging.get_logger(__name__) class __snake_case ( lowerCamelCase__ ): def __init__( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) -> List[str]: '''simple docstring''' UpperCAmelCase : Dict =feature_size UpperCAmelCase : Optional[int] =sampling_rate UpperCAmelCase : Optional[Any] =padding_value UpperCAmelCase : Optional[int] =kwargs.pop('''padding_side''' , '''right''' ) UpperCAmelCase : Union[str, Any] =kwargs.pop('''return_attention_mask''' , snake_case__ ) super().__init__(**snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = True , snake_case__ = None , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , ) -> BatchFeature: '''simple docstring''' if isinstance(snake_case__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase : Tuple ={ key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' f''' to this method that includes {self.model_input_names[0]}, but you provided''' f''' {list(processed_features.keys() )}''' ) UpperCAmelCase : str =processed_features[self.model_input_names[0]] UpperCAmelCase : Union[str, Any] =( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(snake_case__ ) == 0: if return_attention_mask: UpperCAmelCase : List[str] =[] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase : Any =required_input[0] if isinstance(snake_case__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase : Dict =0 while len(required_input[index] ) == 0: index += 1 if index < len(snake_case__ ): UpperCAmelCase : List[Any] =required_input[index][0] if return_tensors is None: if is_tf_tensor(snake_case__ ): UpperCAmelCase : Optional[int] ='''tf''' elif is_torch_tensor(snake_case__ ): UpperCAmelCase : str ='''pt''' elif isinstance(snake_case__ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase : Tuple ='''np''' else: raise ValueError( f'''type of {first_element} unknown: {type(snake_case__ )}. ''' '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase : Any =to_numpy(snake_case__ ) else: UpperCAmelCase : int =[to_numpy(snake_case__ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase : str =self._get_padding_strategies(padding=snake_case__ , max_length=snake_case__ ) UpperCAmelCase : Any =processed_features[self.model_input_names[0]] UpperCAmelCase : Dict =len(snake_case__ ) if not all(len(snake_case__ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) UpperCAmelCase : List[str] =[] for i in range(snake_case__ ): UpperCAmelCase : Any ={k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase : Tuple =self._truncate( snake_case__ , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , truncation=snake_case__ , ) truncated_inputs.append(snake_case__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase : Optional[Any] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase : str =PaddingStrategy.MAX_LENGTH UpperCAmelCase : Optional[Any] ={} for i in range(snake_case__ ): # padding UpperCAmelCase : Optional[int] =self._pad( truncated_inputs[i] , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase : int =[] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase : List[Any] =value.astype(np.floataa ) batch_outputs[key].append(snake_case__ ) return BatchFeature(snake_case__ , tensor_type=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = PaddingStrategy.DO_NOT_PAD , snake_case__ = None , snake_case__ = None , ) -> dict: '''simple docstring''' UpperCAmelCase : List[str] =processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase : Tuple =len(snake_case__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase : str =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(snake_case__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase : List[str] =np.ones(len(snake_case__ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase : Optional[Any] =max_length - len(snake_case__ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase : str =np.pad( processed_features['''attention_mask'''] , (0, difference) ) UpperCAmelCase : Optional[int] =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase : List[str] =np.pad( snake_case__ , snake_case__ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase : int =np.pad( processed_features['''attention_mask'''] , (difference, 0) ) UpperCAmelCase : List[Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase : Optional[int] =np.pad( snake_case__ , snake_case__ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , ) -> List[Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) UpperCAmelCase : Tuple =processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase : Optional[int] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase : Union[str, Any] =len(snake_case__ ) > max_length if needs_to_be_truncated: UpperCAmelCase : Optional[int] =processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase : Union[str, Any] =processed_features['''attention_mask'''][:max_length] return processed_features def UpperCAmelCase__ ( self , snake_case__=False , snake_case__=None ) -> List[str]: '''simple docstring''' if padding is not False: if padding is True: UpperCAmelCase : str =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Tuple =PaddingStrategy(snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : List[str] =padding else: UpperCAmelCase : Optional[Any] =PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
348
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:] if shift_amount >= len(__lowerCAmelCase ): return "0b0" UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:] else: # Get binary (2's complement) representation of negative number UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Optional[Any] =( '''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number ) if shift_amount >= len(__lowerCAmelCase ): return "0b" + binary_number[0] * len(__lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration __snake_case = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]: '''simple docstring''' UpperCAmelCase : List[Any] =['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : Any =list(s_dict.keys() ) for key in keys: UpperCAmelCase : str =key for k, v in WHISPER_MAPPING.items(): if k in key: UpperCAmelCase : Optional[Any] =new_key.replace(__lowerCAmelCase , __lowerCAmelCase ) print(f'''{key} -> {new_key}''' ) UpperCAmelCase : List[str] =s_dict.pop(__lowerCAmelCase ) return s_dict def lowerCAmelCase_ ( __lowerCAmelCase )-> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] =emb.weight.shape UpperCAmelCase : Optional[int] =nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) UpperCAmelCase : Union[str, Any] =emb.weight.data return lin_layer def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> bytes: '''simple docstring''' os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) UpperCAmelCase : int =os.path.basename(__lowerCAmelCase ) UpperCAmelCase : Dict =url.split('''/''' )[-2] UpperCAmelCase : str =os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ) and not os.path.isfile(__lowerCAmelCase ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(__lowerCAmelCase ): UpperCAmelCase : List[Any] =open(__lowerCAmelCase , '''rb''' ).read() if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(__lowerCAmelCase ) as source, open(__lowerCAmelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=__lowerCAmelCase , unit_divisor=10_24 ) as loop: while True: UpperCAmelCase : Dict =source.read(81_92 ) if not buffer: break output.write(__lowerCAmelCase ) loop.update(len(__lowerCAmelCase ) ) UpperCAmelCase : Tuple =open(__lowerCAmelCase , '''rb''' ).read() if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if ".pt" not in checkpoint_path: UpperCAmelCase : Tuple =_download(_MODELS[checkpoint_path] ) else: UpperCAmelCase : str =torch.load(__lowerCAmelCase , map_location='''cpu''' ) UpperCAmelCase : Any =original_checkpoint['''dims'''] UpperCAmelCase : int =original_checkpoint['''model_state_dict'''] UpperCAmelCase : Optional[Any] =state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(__lowerCAmelCase ) rename_keys(__lowerCAmelCase ) UpperCAmelCase : Any =True UpperCAmelCase : Tuple =state_dict['''decoder.layers.0.fc1.weight'''].shape[0] UpperCAmelCase : int =WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__lowerCAmelCase , decoder_ffn_dim=__lowerCAmelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) UpperCAmelCase : List[Any] =WhisperForConditionalGeneration(__lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase : List[Any] =model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: UpperCAmelCase : Any =make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCAmelCase : Dict =proj_out_weights model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __snake_case = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
348
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) # TODO Update this __snake_case = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """esm""" def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =vocab_size UpperCAmelCase : str =hidden_size UpperCAmelCase : List[Any] =num_hidden_layers UpperCAmelCase : Optional[Any] =num_attention_heads UpperCAmelCase : str =intermediate_size UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : int =attention_probs_dropout_prob UpperCAmelCase : Dict =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : Union[str, Any] =layer_norm_eps UpperCAmelCase : Dict =position_embedding_type UpperCAmelCase : Optional[Any] =use_cache UpperCAmelCase : int =emb_layer_norm_before UpperCAmelCase : List[str] =token_dropout UpperCAmelCase : Optional[Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase : Optional[Any] =EsmFoldConfig() elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ ) UpperCAmelCase : Tuple =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase : Any =get_default_vocab_list() else: UpperCAmelCase : Tuple =vocab_list else: UpperCAmelCase : Optional[int] =None UpperCAmelCase : Union[str, Any] =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , snake_case__ ): UpperCAmelCase : str =self.esmfold_config.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : str = None __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : float = 0 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : int = 128 __lowerCamelCase : "TrunkConfig" = None def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.trunk is None: UpperCAmelCase : str =TrunkConfig() elif isinstance(self.trunk , snake_case__ ): UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =asdict(self ) UpperCAmelCase : Any =self.trunk.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 48 __lowerCamelCase : int = 1024 __lowerCamelCase : int = 128 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : float = 0 __lowerCamelCase : float = 0 __lowerCamelCase : bool = False __lowerCamelCase : int = 4 __lowerCamelCase : Optional[int] = 128 __lowerCamelCase : "StructureModuleConfig" = None def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' if self.structure_module is None: UpperCAmelCase : Any =StructureModuleConfig() elif isinstance(self.structure_module , snake_case__ ): UpperCAmelCase : str =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =asdict(self ) UpperCAmelCase : Tuple =self.structure_module.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 384 __lowerCamelCase : int = 128 __lowerCamelCase : int = 16 __lowerCamelCase : int = 128 __lowerCamelCase : int = 12 __lowerCamelCase : int = 4 __lowerCamelCase : int = 8 __lowerCamelCase : float = 0.1 __lowerCamelCase : int = 8 __lowerCamelCase : int = 1 __lowerCamelCase : int = 2 __lowerCamelCase : int = 7 __lowerCamelCase : int = 10 __lowerCamelCase : float = 1E-8 __lowerCamelCase : float = 1E5 def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return asdict(self ) def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
348
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = """philschmid/bart-large-cnn-samsum""" __lowerCamelCase : Optional[Any] = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) __lowerCamelCase : List[str] = """summarizer""" __lowerCamelCase : Any = AutoTokenizer __lowerCamelCase : Dict = AutoModelForSeqaSeqLM __lowerCamelCase : Any = ["""text"""] __lowerCamelCase : int = ["""text"""] def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple: '''simple docstring''' return self.pre_processor(snake_case__ , return_tensors='''pt''' , truncation=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]: '''simple docstring''' return self.model.generate(**snake_case__ )[0] def UpperCAmelCase__ ( self , snake_case__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
348
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,) __lowerCamelCase : List[str] = 10 def UpperCAmelCase__ ( self , **snake_case__ ) -> str: '''simple docstring''' UpperCAmelCase : int ={ '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**snake_case__ ) return config def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : str =self.dummy_model() UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Any =model(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : int =output.prev_sample UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config() UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : Optional[int] =self.dummy_model() UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : str =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =output.prev_sample UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : List[Any] =self.scheduler_classes[0] UpperCAmelCase : Dict =self.get_scheduler_config() UpperCAmelCase : List[str] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ ) UpperCAmelCase : int =self.dummy_model() UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : int =model(snake_case__ , snake_case__ ) UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =output.prev_sample UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) ) if str(snake_case__ ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
348
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = """ibert""" def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=False , snake_case__="none" , **snake_case__ , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : Any =hidden_size UpperCAmelCase : List[Any] =num_hidden_layers UpperCAmelCase : Tuple =num_attention_heads UpperCAmelCase : Optional[Any] =hidden_act UpperCAmelCase : int =intermediate_size UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : List[Any] =attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : Tuple =type_vocab_size UpperCAmelCase : Optional[int] =initializer_range UpperCAmelCase : int =layer_norm_eps UpperCAmelCase : Optional[int] =position_embedding_type UpperCAmelCase : Any =quant_mode UpperCAmelCase : List[str] =force_dequant class __snake_case ( lowerCamelCase__ ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase : str ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase : Optional[Any] ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
348
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id ) UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean() UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item()) UpperCAmelCase : List[str] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
1
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __snake_case = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) __snake_case = dataset.iloc[:, 1:2].values __snake_case = dataset.iloc[:, 2].values __snake_case , __snake_case , __snake_case , __snake_case = train_test_split(X, y, test_size=0.2, random_state=0) __snake_case = PolynomialFeatures(degree=4) __snake_case = poly_reg.fit_transform(X) __snake_case = LinearRegression() pol_reg.fit(X_poly, y) def lowerCAmelCase_ ( )-> str: '''simple docstring''' plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color='''red''' ) plt.plot(__lowerCAmelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCAmelCase ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
348
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[Any] =ort.SessionOptions() UpperCAmelCase : Optional[int] =False return options def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Dict ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : Any =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : str =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : int =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
348
1
from collections.abc import Sequence def lowerCAmelCase_ ( __lowerCAmelCase = None )-> int: '''simple docstring''' if nums is None or not nums: raise ValueError('''Input sequence should not be empty''' ) UpperCAmelCase : Dict =nums[0] for i in range(1 , len(__lowerCAmelCase ) ): UpperCAmelCase : Dict =nums[i] UpperCAmelCase : Any =max(__lowerCAmelCase , ans + num , __lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __snake_case = int(input('''Enter number of elements : ''').strip()) __snake_case = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
348
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase_ ( )-> int: '''simple docstring''' UpperCAmelCase : str ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase ) return dataset class __snake_case ( lowerCamelCase__ ): def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =get_dataset() UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str =get_dataset() UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ ) self.assertEqual(len(snake_case__ ) , 2 ) print(snake_case__ ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
348
1
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __snake_case = datasets.logging.get_logger(__name__) __snake_case = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' __snake_case = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' __snake_case = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="dummy_doc" )-> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] ={doc: key_lines} UpperCAmelCase : Union[str, Any] ={doc: sys_lines} UpperCAmelCase : Optional[Any] ={} UpperCAmelCase : Dict =0 UpperCAmelCase : Tuple =0 UpperCAmelCase : Optional[int] =0 UpperCAmelCase : int =0 UpperCAmelCase : str =0 UpperCAmelCase : Dict =0 UpperCAmelCase , UpperCAmelCase : Optional[Any] =reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase ) key_singletons_num += singletons_num if NP_only or min_span: UpperCAmelCase : Optional[Any] =reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase : List[Any] =reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase ) sys_singletons_num += singletons_num if NP_only or min_span: UpperCAmelCase : Optional[int] =reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase ) if remove_nested: UpperCAmelCase , UpperCAmelCase : List[str] =reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters UpperCAmelCase , UpperCAmelCase : List[str] =reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters UpperCAmelCase : Dict =reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Union[str, Any] =reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : List[str] =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' '''files, respectively''' ) return doc_coref_infos def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any: '''simple docstring''' UpperCAmelCase : str =get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Union[str, Any] ={} UpperCAmelCase : Dict =0 UpperCAmelCase : Any =0 for name, metric in metrics: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , ) if conll_subparts_num == 3: UpperCAmelCase : Union[str, Any] =(conll / 3) * 1_00 logger.info(f'''CoNLL score: {conll:.2f}''' ) output_scores.update({'''conll_score''': conll} ) return output_scores def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : str =False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: UpperCAmelCase : Union[str, Any] =line.split()[5] if not parse_col == "-": UpperCAmelCase : Tuple =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[Any] =[ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: UpperCAmelCase : Dict =util.check_gold_parse_annotation(snake_case__ ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" UpperCAmelCase : Any =evaluate( key_lines=snake_case__ , sys_lines=snake_case__ , metrics=snake_case__ , NP_only=snake_case__ , remove_nested=snake_case__ , keep_singletons=snake_case__ , min_span=snake_case__ , ) return score
348
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str: '''simple docstring''' super().__init__() UpperCAmelCase : Optional[Any] =learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ ) else: UpperCAmelCase : Union[str, Any] =None UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : VQModel __lowerCamelCase : CLIPTextModel __lowerCamelCase : CLIPTokenizer __lowerCamelCase : TransformeraDModel __lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings __lowerCamelCase : VQDiffusionScheduler def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int: '''simple docstring''' super().__init__() self.register_modules( vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1 # get prompt text embeddings UpperCAmelCase : Optional[int] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) UpperCAmelCase : int =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate text embeddings for each generation per prompt UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 ) else: UpperCAmelCase : str =[''''''] * batch_size UpperCAmelCase : Tuple =text_input_ids.shape[-1] UpperCAmelCase : Optional[Any] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , ) UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1] UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 ) UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =1 elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Tuple =len(snake_case__ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' ) UpperCAmelCase : Tuple =batch_size * num_images_per_prompt UpperCAmelCase : List[str] =guidance_scale > 1.0 UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(snake_case__ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1 UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCAmelCase : Any =latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(snake_case__ , device=self.device ) UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device ) UpperCAmelCase : Optional[int] =latents for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 ) UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ ) UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase : Optional[Any] =model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ ) UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ ) UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ ) UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ ) UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase : int =keep_mask[:, :-1, :] UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase : Dict =log_p_x_0.clone() UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0) return rv
348
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __snake_case = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = SpeechTaTokenizer __lowerCamelCase : Dict = False __lowerCamelCase : Dict = True def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Tuple =SpeechTaTokenizer(snake_case__ ) UpperCAmelCase : Any =AddedToken('''<mask>''' , lstrip=snake_case__ , rstrip=snake_case__ ) UpperCAmelCase : int =mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple ='''this is a test''' UpperCAmelCase : Dict ='''this is a test''' return input_text, output_text def UpperCAmelCase__ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ) -> Any: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str =self.get_input_output_texts(snake_case__ ) UpperCAmelCase : Optional[int] =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) UpperCAmelCase : str =tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ ) return text, ids def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[int] ='''<pad>''' UpperCAmelCase : Optional[int] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[int] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(snake_case__ ) , 81 ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Dict =self.get_tokenizers(do_lower_case=snake_case__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase : Tuple =tokenizer.vocab_size UpperCAmelCase : Union[str, Any] =len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) UpperCAmelCase : Tuple =['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] UpperCAmelCase : Any =tokenizer.add_tokens(snake_case__ ) UpperCAmelCase : Union[str, Any] =tokenizer.vocab_size UpperCAmelCase : int =len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , len(snake_case__ ) ) self.assertEqual(snake_case__ , all_size + len(snake_case__ ) ) UpperCAmelCase : int =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=snake_case__ ) self.assertGreaterEqual(len(snake_case__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) UpperCAmelCase : Optional[int] ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} UpperCAmelCase : Dict =tokenizer.add_special_tokens(snake_case__ ) UpperCAmelCase : Any =tokenizer.vocab_size UpperCAmelCase : List[Any] =len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , len(snake_case__ ) ) self.assertEqual(snake_case__ , all_size_a + len(snake_case__ ) ) UpperCAmelCase : Tuple =tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=snake_case__ ) self.assertGreaterEqual(len(snake_case__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.get_tokenizer() UpperCAmelCase : int =tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(snake_case__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) UpperCAmelCase : Dict =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) UpperCAmelCase : List[str] =tokenizer.convert_tokens_to_ids(snake_case__ ) # fmt: off self.assertListEqual(snake_case__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on UpperCAmelCase : str =tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =[ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off UpperCAmelCase : Tuple ={ '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=snake_case__ , )
348
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =self.dummy_uncond_unet UpperCAmelCase : Optional[int] =KarrasVeScheduler() UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[str] =torch.manual_seed(0 ) UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : str =torch.manual_seed(0 ) UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0] UpperCAmelCase : Any =image[0, -3:, -3:, -1] UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256''' UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ ) UpperCAmelCase : Dict =KarrasVeScheduler() UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Any =torch.manual_seed(0 ) UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
348
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __snake_case = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''MobileViTFeatureExtractor'''] __snake_case = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
import qiskit def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' ) UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = half_adder(1, 1) print(f'Half Adder Output Qubit Counts: {counts}')
348
1
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int =FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-canny''' , from_pt=snake_case__ , dtype=jnp.bfloataa ) UpperCAmelCase , UpperCAmelCase : Optional[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=snake_case__ , from_pt=snake_case__ , dtype=jnp.bfloataa ) UpperCAmelCase : int =controlnet_params UpperCAmelCase : Any ='''bird''' UpperCAmelCase : Any =jax.device_count() UpperCAmelCase : List[Any] =pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ) UpperCAmelCase : Tuple =pipe.prepare_image_inputs([canny_image] * num_samples ) UpperCAmelCase : Union[str, Any] =jax.random.PRNGKey(0 ) UpperCAmelCase : List[Any] =jax.random.split(snake_case__ , jax.device_count() ) UpperCAmelCase : Optional[Any] =replicate(snake_case__ ) UpperCAmelCase : str =shard(snake_case__ ) UpperCAmelCase : Dict =shard(snake_case__ ) UpperCAmelCase : Dict =pipe( prompt_ids=snake_case__ , image=snake_case__ , params=snake_case__ , prng_seed=snake_case__ , num_inference_steps=50 , jit=snake_case__ , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase : Optional[Any] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase : Dict =images[0, 253:256, 253:256, -1] UpperCAmelCase : List[str] =jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase : Tuple =jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any =FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-openpose''' , from_pt=snake_case__ , dtype=jnp.bfloataa ) UpperCAmelCase , UpperCAmelCase : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=snake_case__ , from_pt=snake_case__ , dtype=jnp.bfloataa ) UpperCAmelCase : Union[str, Any] =controlnet_params UpperCAmelCase : Optional[Any] ='''Chef in the kitchen''' UpperCAmelCase : List[Any] =jax.device_count() UpperCAmelCase : Tuple =pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' ) UpperCAmelCase : Dict =pipe.prepare_image_inputs([pose_image] * num_samples ) UpperCAmelCase : Optional[int] =jax.random.PRNGKey(0 ) UpperCAmelCase : Optional[Any] =jax.random.split(snake_case__ , jax.device_count() ) UpperCAmelCase : int =replicate(snake_case__ ) UpperCAmelCase : str =shard(snake_case__ ) UpperCAmelCase : Optional[int] =shard(snake_case__ ) UpperCAmelCase : Tuple =pipe( prompt_ids=snake_case__ , image=snake_case__ , params=snake_case__ , prng_seed=snake_case__ , num_inference_steps=50 , jit=snake_case__ , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase : List[Any] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase : Tuple =images[0, 253:256, 253:256, -1] UpperCAmelCase : Any =jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase : Optional[int] =jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
348
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __snake_case : __lowerCamelCase : str = BlenderbotConfig __lowerCamelCase : Optional[Any] = {} __lowerCamelCase : Optional[int] = """gelu""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : Dict =seq_length UpperCAmelCase : Optional[Any] =is_training UpperCAmelCase : List[str] =use_labels UpperCAmelCase : List[Any] =vocab_size UpperCAmelCase : Optional[int] =hidden_size UpperCAmelCase : Tuple =num_hidden_layers UpperCAmelCase : Any =num_attention_heads UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : Optional[int] =attention_probs_dropout_prob UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : List[Any] =eos_token_id UpperCAmelCase : Optional[int] =pad_token_id UpperCAmelCase : Tuple =bos_token_id def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[Any] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder() UpperCAmelCase : Any =inputs_dict['''input_ids'''] UpperCAmelCase : str =input_ids[:1, :] UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase : Tuple =inputs_dict['''head_mask'''] UpperCAmelCase : List[Any] =1 # first forward pass UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str: '''simple docstring''' if attention_mask is None: UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : Tuple =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase : Dict = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] =TFBlenderbotModelTester(self ) UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class __snake_case ( unittest.TestCase ): __lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""] __lowerCamelCase : Dict = """facebook/blenderbot-400M-distill""" @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' ) UpperCAmelCase : Optional[int] =self.model.generate( model_inputs.input_ids , ) UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
348
1
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]: '''simple docstring''' print('''Loading config file...''' ) def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ): UpperCAmelCase : Optional[Any] =[] for k, v in d.items(): UpperCAmelCase : str =parent_key + sep + k if parent_key else k if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(__lowerCAmelCase ) UpperCAmelCase : Optional[Any] =argparse.Namespace() with open(__lowerCAmelCase , '''r''' ) as yaml_file: try: UpperCAmelCase : Optional[int] =yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader ) UpperCAmelCase : Any =flatten_yaml_as_dict(__lowerCAmelCase ) for k, v in flat_cfg.items(): setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(__lowerCAmelCase , str(__lowerCAmelCase ) ) ) return config def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' UpperCAmelCase : Any =MobileViTVaConfig() UpperCAmelCase : Dict =False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase : List[str] =10_00 if int(task_name.strip().split('''_''' )[-1] ) == 3_84: UpperCAmelCase : Tuple =3_84 else: UpperCAmelCase : Dict =2_56 UpperCAmelCase : Optional[int] ='''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase : Optional[Any] =2_10_00 if int(task_name.strip().split('''_''' )[-1] ) == 3_84: UpperCAmelCase : Union[str, Any] =3_84 else: UpperCAmelCase : Tuple =2_56 UpperCAmelCase : Tuple ='''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase : Optional[int] =1_51 UpperCAmelCase : int =5_12 UpperCAmelCase : List[Any] ='''ade20k-id2label.json''' UpperCAmelCase : List[Any] =True elif task_name.startswith('''voc_''' ): UpperCAmelCase : List[str] =21 UpperCAmelCase : Optional[int] =5_12 UpperCAmelCase : Optional[Any] ='''pascal-voc-id2label.json''' UpperCAmelCase : List[Any] =True # orig_config UpperCAmelCase : Dict =load_orig_config_file(__lowerCAmelCase ) assert getattr(__lowerCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase : Tuple =getattr(__lowerCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(__lowerCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase : Union[str, Any] =getattr(__lowerCAmelCase , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase : List[Any] =getattr(__lowerCAmelCase , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase : List[Any] =getattr(__lowerCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase : str =getattr(__lowerCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 5_12 ) UpperCAmelCase : Any =getattr(__lowerCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase : str ='''huggingface/label-files''' UpperCAmelCase : List[Any] =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Optional[int] ={int(__lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : List[Any] =idalabel UpperCAmelCase : int ={v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' UpperCAmelCase : int =dct.pop(__lowerCAmelCase ) UpperCAmelCase : Union[str, Any] =val def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False )-> Optional[Any]: '''simple docstring''' if base_model: UpperCAmelCase : Tuple ='''''' else: UpperCAmelCase : Optional[Any] ='''mobilevitv2.''' UpperCAmelCase : List[str] =[] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase : Optional[int] =k[8:] else: UpperCAmelCase : Optional[int] =k if ".block." in k: UpperCAmelCase : Tuple =k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase : Optional[int] =k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase : Dict =k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase : Optional[Any] =k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: UpperCAmelCase : Optional[Any] =k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: UpperCAmelCase : Any =k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase : Optional[Any] =k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: UpperCAmelCase : Tuple =k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: UpperCAmelCase : Optional[int] =k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: UpperCAmelCase : List[Any] =k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase : Dict =[0, 1] elif i == 4: UpperCAmelCase : Tuple =[0, 1, 2, 3] elif i == 5: UpperCAmelCase : Dict =[0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: UpperCAmelCase : int =k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: UpperCAmelCase : Dict =k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: UpperCAmelCase : Optional[int] =k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: UpperCAmelCase : Optional[int] =k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase : List[str] =k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase : Union[str, Any] =k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase : List[Any] =k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase : List[Any] =k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase : int =k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase : List[str] =k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase : Union[str, Any] =k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase : List[Any] =k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] =[] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(__lowerCAmelCase ) for k in keys_to_ignore: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( )-> str: '''simple docstring''' UpperCAmelCase : Tuple ='''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase : List[str] =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]: '''simple docstring''' UpperCAmelCase : Any =get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase ) # load original state_dict UpperCAmelCase : Tuple =torch.load(__lowerCAmelCase , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase : Union[str, Any] =MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval() UpperCAmelCase : Any =False else: UpperCAmelCase : int =MobileViTVaForImageClassification(__lowerCAmelCase ).eval() UpperCAmelCase : Optional[int] =False # remove and rename some keys of load the original model UpperCAmelCase : Union[str, Any] =checkpoint remove_unused_keys(__lowerCAmelCase ) UpperCAmelCase : Union[str, Any] =create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # load modified state_dict model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase : Any =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase : List[str] =image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase : List[str] =model(**__lowerCAmelCase ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase : Dict =outputs.logits UpperCAmelCase : int =logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase : Tuple =torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) __snake_case = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
348
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = """sew-d""" def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int: '''simple docstring''' super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) UpperCAmelCase : Union[str, Any] =hidden_size UpperCAmelCase : Union[str, Any] =feat_extract_norm UpperCAmelCase : Optional[Any] =feat_extract_activation UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : int =list(snake_case__ ) UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : str =conv_bias UpperCAmelCase : Tuple =num_conv_pos_embeddings UpperCAmelCase : Dict =num_conv_pos_embedding_groups UpperCAmelCase : str =len(self.conv_dim ) UpperCAmelCase : Dict =num_hidden_layers UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : List[Any] =squeeze_factor UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : int =position_buckets UpperCAmelCase : Optional[int] =share_att_key UpperCAmelCase : Optional[int] =relative_attention UpperCAmelCase : Tuple =norm_rel_ebd UpperCAmelCase : List[Any] =list(snake_case__ ) UpperCAmelCase : Dict =hidden_act UpperCAmelCase : Optional[int] =num_attention_heads UpperCAmelCase : Any =hidden_dropout UpperCAmelCase : str =attention_dropout UpperCAmelCase : Union[str, Any] =activation_dropout UpperCAmelCase : str =feat_proj_dropout UpperCAmelCase : Union[str, Any] =final_dropout UpperCAmelCase : Optional[int] =layer_norm_eps UpperCAmelCase : str =feature_layer_norm_eps UpperCAmelCase : str =initializer_range UpperCAmelCase : Any =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Union[str, Any] =apply_spec_augment UpperCAmelCase : Optional[Any] =mask_time_prob UpperCAmelCase : Tuple =mask_time_length UpperCAmelCase : str =mask_time_min_masks UpperCAmelCase : Optional[int] =mask_feature_prob UpperCAmelCase : Optional[Any] =mask_feature_length UpperCAmelCase : List[Any] =mask_feature_min_masks # ctc loss UpperCAmelCase : str =ctc_loss_reduction UpperCAmelCase : Optional[int] =ctc_zero_infinity # sequence classification UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum UpperCAmelCase : int =classifier_proj_size @property def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __snake_case = 16 __snake_case = 32 def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 16 )-> List[str]: '''simple docstring''' UpperCAmelCase : int =AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase : List[str] =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase : Optional[Any] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase : Tuple =datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase : List[str] =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase : Tuple =1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase : Union[str, Any] =16 elif accelerator.mixed_precision != "no": UpperCAmelCase : Any =8 else: UpperCAmelCase : int =None return tokenizer.pad( __lowerCAmelCase , padding='''longest''' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase : Optional[Any] =DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) UpperCAmelCase : str =DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __snake_case = mocked_dataloaders # noqa: F811 def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCAmelCase ) == "1": UpperCAmelCase : Dict =2 # New Code # UpperCAmelCase : Any =int(args.gradient_accumulation_steps ) UpperCAmelCase : Tuple =int(args.local_sgd_steps ) # Initialize accelerator UpperCAmelCase : List[str] =Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase : Optional[int] =config['''lr'''] UpperCAmelCase : int =int(config['''num_epochs'''] ) UpperCAmelCase : int =int(config['''seed'''] ) UpperCAmelCase : Any =int(config['''batch_size'''] ) UpperCAmelCase : Union[str, Any] =evaluate.load('''glue''' , '''mrpc''' ) set_seed(__lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase : Optional[int] =get_dataloaders(__lowerCAmelCase , __lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase : str =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase : Union[str, Any] =model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase : Optional[int] =AdamW(params=model.parameters() , lr=__lowerCAmelCase ) # Instantiate scheduler UpperCAmelCase : List[str] =get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any =accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Now we train the model for epoch in range(__lowerCAmelCase ): model.train() with LocalSGD( accelerator=__lowerCAmelCase , model=__lowerCAmelCase , local_sgd_steps=__lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowerCAmelCase ): UpperCAmelCase : List[str] =model(**__lowerCAmelCase ) UpperCAmelCase : List[str] =output.loss accelerator.backward(__lowerCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase : Union[str, Any] =model(**__lowerCAmelCase ) UpperCAmelCase : int =outputs.logits.argmax(dim=-1 ) UpperCAmelCase , UpperCAmelCase : int =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCAmelCase , references=__lowerCAmelCase , ) UpperCAmelCase : str =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , __lowerCAmelCase ) def lowerCAmelCase_ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument( '''--local_sgd_steps''' , type=__lowerCAmelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) UpperCAmelCase : Union[str, Any] =parser.parse_args() UpperCAmelCase : Optional[int] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
348
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __snake_case = 4 __snake_case = 3 class __snake_case ( lowerCamelCase__ ): pass def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]: '''simple docstring''' for shard in shards: for i in range(__lowerCAmelCase ): yield {"i": i, "shard": shard} def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =int(os.environ['''RANK'''] ) UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] ) UpperCAmelCase : List[Any] =ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCAmelCase ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase ) parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 ) UpperCAmelCase : Any =parser.parse_args() UpperCAmelCase : List[str] =args.streaming UpperCAmelCase : Tuple =args.num_workers UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]} UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase ) if not streaming: UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) ) UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase ) UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase ) UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCAmelCase : str =full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCAmelCase : List[Any] =sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
348
1
__snake_case = 0 # The first color of the flag. __snake_case = 1 # The second color of the flag. __snake_case = 2 # The third color of the flag. __snake_case = (red, white, blue) def lowerCAmelCase_ ( __lowerCAmelCase )-> list: '''simple docstring''' if not sequence: return [] if len(__lowerCAmelCase ) == 1: return list(__lowerCAmelCase ) UpperCAmelCase : Union[str, Any] =0 UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase ) - 1 UpperCAmelCase : Tuple =0 while mid <= high: if sequence[mid] == colors[0]: UpperCAmelCase , UpperCAmelCase : Union[str, Any] =sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: UpperCAmelCase , UpperCAmelCase : List[Any] =sequence[high], sequence[mid] high -= 1 else: UpperCAmelCase : List[str] =f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(__lowerCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() __snake_case = input('''Enter numbers separated by commas:\n''').strip() __snake_case = [int(item.strip()) for item in user_input.split(''',''')] print(f'{dutch_national_flag_sort(unsorted)}')
348
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple: '''simple docstring''' UpperCAmelCase : int =MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: UpperCAmelCase : Tuple =[1_44, 1_92, 2_40] UpperCAmelCase : Dict =[16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: UpperCAmelCase : Tuple =[96, 1_20, 1_44] UpperCAmelCase : int =[16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: UpperCAmelCase : Optional[int] =[64, 80, 96] UpperCAmelCase : Union[str, Any] =[16, 16, 24, 48, 64, 80, 3_20] UpperCAmelCase : str =0.05 UpperCAmelCase : Union[str, Any] =2.0 if mobilevit_name.startswith('''deeplabv3_''' ): UpperCAmelCase : List[Any] =5_12 UpperCAmelCase : Any =16 UpperCAmelCase : str =21 UpperCAmelCase : Optional[int] ='''pascal-voc-id2label.json''' else: UpperCAmelCase : Optional[int] =10_00 UpperCAmelCase : Optional[Any] ='''imagenet-1k-id2label.json''' UpperCAmelCase : Optional[int] ='''huggingface/label-files''' UpperCAmelCase : Tuple =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : Optional[Any] ={int(__lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : Optional[Any] =idalabel UpperCAmelCase : int ={v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False )-> Optional[Any]: '''simple docstring''' for i in range(1 , 6 ): if f'''layer_{i}.''' in name: UpperCAmelCase : List[str] =name.replace(f'''layer_{i}.''' , f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: UpperCAmelCase : Optional[Any] =name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: UpperCAmelCase : Any =name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: UpperCAmelCase : Optional[Any] =name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: UpperCAmelCase : Any =name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: UpperCAmelCase : Dict =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: UpperCAmelCase : str =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: UpperCAmelCase : Optional[Any] =name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: UpperCAmelCase : int =name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: UpperCAmelCase : Tuple =name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if f'''.{i}.{j}.''' in name: UpperCAmelCase : List[str] =name.replace(f'''.{i}.{j}.''' , f'''.{i}.layer.{j}.''' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if f'''.{i}.{j}.''' in name: UpperCAmelCase : List[str] =name.replace(f'''.{i}.{j}.''' , f'''.{i}.''' ) if "expand_1x1" in name: UpperCAmelCase : int =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: UpperCAmelCase : Any =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: UpperCAmelCase : List[str] =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if f'''.global_rep.{i}.weight''' in name: UpperCAmelCase : Dict =name.replace(f'''.global_rep.{i}.weight''' , '''.layernorm.weight''' ) if f'''.global_rep.{i}.bias''' in name: UpperCAmelCase : Optional[int] =name.replace(f'''.global_rep.{i}.bias''' , '''.layernorm.bias''' ) if ".global_rep." in name: UpperCAmelCase : List[str] =name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: UpperCAmelCase : str =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: UpperCAmelCase : str =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: UpperCAmelCase : List[str] =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: UpperCAmelCase : List[Any] =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: UpperCAmelCase : Union[str, Any] =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: UpperCAmelCase : int =name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: UpperCAmelCase : Union[str, Any] =name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: UpperCAmelCase : Optional[Any] =name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: UpperCAmelCase : Optional[int] =name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: UpperCAmelCase : str =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: UpperCAmelCase : Union[str, Any] =name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): UpperCAmelCase : List[str] ='''mobilevit.''' + name return name def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[Any]: '''simple docstring''' if base_model: UpperCAmelCase : List[Any] ='''''' else: UpperCAmelCase : Optional[Any] ='''mobilevit.''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : List[str] =orig_state_dict.pop(__lowerCAmelCase ) if key[:8] == "encoder.": UpperCAmelCase : Any =key[8:] if "qkv" in key: UpperCAmelCase : Tuple =key.split('''.''' ) UpperCAmelCase : Tuple =int(key_split[0][6:] ) - 1 UpperCAmelCase : int =int(key_split[3] ) UpperCAmelCase : Any =model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) UpperCAmelCase : Union[str, Any] =layer.transformer.layer[transformer_num].attention.attention.all_head_size UpperCAmelCase : List[Any] =( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: UpperCAmelCase : Optional[int] =val[:dim, :] UpperCAmelCase : Optional[Any] =val[dim : dim * 2, :] UpperCAmelCase : Tuple =val[-dim:, :] else: UpperCAmelCase : Union[str, Any] =val[:dim] UpperCAmelCase : Optional[int] =val[dim : dim * 2] UpperCAmelCase : List[Any] =val[-dim:] else: UpperCAmelCase : Any =val return orig_state_dict def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> str: '''simple docstring''' UpperCAmelCase : List[str] =get_mobilevit_config(__lowerCAmelCase ) # load original state_dict UpperCAmelCase : Union[str, Any] =torch.load(__lowerCAmelCase , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): UpperCAmelCase : str =MobileViTForSemanticSegmentation(__lowerCAmelCase ).eval() else: UpperCAmelCase : Tuple =MobileViTForImageClassification(__lowerCAmelCase ).eval() UpperCAmelCase : Optional[Any] =convert_state_dict(__lowerCAmelCase , __lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase : Dict =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase : str =image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase : List[Any] =model(**__lowerCAmelCase ) UpperCAmelCase : Optional[int] =outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": UpperCAmelCase : List[str] =torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": UpperCAmelCase : Union[str, Any] =torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": UpperCAmelCase : Optional[Any] =torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": UpperCAmelCase : int =torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": UpperCAmelCase : Optional[int] =torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": UpperCAmelCase : Optional[int] =torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: UpperCAmelCase : Tuple ={ '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) UpperCAmelCase : List[str] =model_mapping[mobilevit_name] image_processor.push_to_hub(__lowerCAmelCase , organization='''apple''' ) model.push_to_hub(__lowerCAmelCase , organization='''apple''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
348
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __snake_case : def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str: '''simple docstring''' UpperCAmelCase : str =parent UpperCAmelCase : Tuple =batch_size UpperCAmelCase : Optional[int] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Tuple =use_input_mask UpperCAmelCase : List[Any] =use_token_type_ids UpperCAmelCase : Optional[Any] =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : List[Any] =hidden_size UpperCAmelCase : Optional[int] =rotary_dim UpperCAmelCase : Union[str, Any] =num_hidden_layers UpperCAmelCase : List[Any] =num_attention_heads UpperCAmelCase : Dict =intermediate_size UpperCAmelCase : Union[str, Any] =hidden_act UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : Dict =attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : str =initializer_range UpperCAmelCase : Optional[int] =None UpperCAmelCase : List[Any] =vocab_size - 1 UpperCAmelCase : Optional[Any] =vocab_size - 1 UpperCAmelCase : List[Any] =vocab_size - 1 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] =None if self.use_input_mask: UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Dict =GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =20 UpperCAmelCase : Any =model_class_name(snake_case__ ) UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =model( input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , ) UpperCAmelCase : List[Any] =model(snake_case__ ) UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Dict =20 UpperCAmelCase : Dict =model_class_name(snake_case__ ) UpperCAmelCase : Tuple =jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : int =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : str =model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) @tooslow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ ) UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : str =False UpperCAmelCase : Union[str, Any] =model.config.eos_token_id UpperCAmelCase : List[Any] =jax.jit(model.generate ) UpperCAmelCase : Dict =jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) UpperCAmelCase : Tuple =[ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(snake_case__ , snake_case__ ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : int =0 UpperCAmelCase : Optional[int] =1 UpperCAmelCase : Optional[int] =0 UpperCAmelCase : Union[str, Any] =1 UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval() UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ ) UpperCAmelCase : Union[str, Any] =fx_state with torch.no_grad(): UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case__ ) UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ ) UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : int =getattr(snake_case__ , snake_case__ ) UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval() UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params ) UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : str =0 UpperCAmelCase : Any =1 UpperCAmelCase : List[Any] =0 UpperCAmelCase : Tuple =1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case__ ) UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ ) with torch.no_grad(): UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : Tuple =model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ )
348
1
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: UpperCAmelCase : List[str] =mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: UpperCAmelCase : Dict =max( mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , ) UpperCAmelCase : Any =val return f[i][j] def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict: '''simple docstring''' UpperCAmelCase : List[Any] =[[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: UpperCAmelCase : List[str] =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: UpperCAmelCase : Tuple =dp[i - 1][w_] return dp[n][w_], dp def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]: '''simple docstring''' if not (isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) UpperCAmelCase : List[str] =len(__lowerCAmelCase ) if num_items != len(__lowerCAmelCase ): UpperCAmelCase : Optional[int] =( '''The number of weights must be the same as the number of values.\n''' f'''But got {num_items} weights and {len(__lowerCAmelCase )} values''' ) raise ValueError(__lowerCAmelCase ) for i in range(__lowerCAmelCase ): if not isinstance(wt[i] , __lowerCAmelCase ): UpperCAmelCase : int =( '''All weights must be integers but got weight of ''' f'''type {type(wt[i] )} at index {i}''' ) raise TypeError(__lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase : Dict =knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : set =set() _construct_solution(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return optimal_val, example_optional_set def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase ) else: optimal_set.add(__lowerCAmelCase ) _construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , j - wt[i - 1] , __lowerCAmelCase ) if __name__ == "__main__": __snake_case = [3, 2, 4, 4] __snake_case = [4, 3, 2, 3] __snake_case = 4 __snake_case = 6 __snake_case = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __snake_case , __snake_case = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __snake_case , __snake_case = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('''optimal_value = ''', optimal_solution) print('''An optimal subset corresponding to the optimal value''', optimal_subset)
348
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
import operator def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None )-> list: '''simple docstring''' UpperCAmelCase : Optional[Any] =operator.lt if reverse else operator.gt UpperCAmelCase : Optional[int] =solution or [] if not arr: return solution UpperCAmelCase : Optional[int] =[arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: UpperCAmelCase : str =sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
348
import os from typing import Dict, List, Tuple, TypeVar, Union __snake_case = TypeVar('''T''') __snake_case = Union[List[T], Tuple[T, ...]] __snake_case = Union[T, List[T], Dict[str, T]] __snake_case = Union[str, bytes, os.PathLike]
348
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = ["""image_processor""", """tokenizer"""] __lowerCamelCase : int = """Pix2StructImageProcessor""" __lowerCamelCase : Union[str, Any] = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =False super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__=None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 2048 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ) -> BatchEncoding: '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: UpperCAmelCase : Optional[Any] =self.tokenizer UpperCAmelCase : Optional[Any] =self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values UpperCAmelCase : Dict =self.image_processor( snake_case__ , return_tensors=snake_case__ , max_patches=snake_case__ , **snake_case__ ) else: # add pixel_values and bbox UpperCAmelCase : List[str] =self.image_processor( snake_case__ , return_tensors=snake_case__ , max_patches=snake_case__ , header_text=snake_case__ , **snake_case__ ) if text is not None and not self.image_processor.is_vqa: UpperCAmelCase : Tuple =self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) if "attention_mask" in text_encoding: UpperCAmelCase : Any =text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: UpperCAmelCase : List[Any] =text_encoding.pop('''input_ids''' ) else: UpperCAmelCase : Optional[Any] =None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Dict =self.tokenizer.model_input_names UpperCAmelCase : Optional[Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
348
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } __snake_case = '''▁''' class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Dict = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = BigBirdTokenizer __lowerCamelCase : Any = ["""input_ids""", """attention_mask"""] __lowerCamelCase : List[int] = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase : Tuple =vocab_file UpperCAmelCase : Optional[int] =False if not self.vocab_file else True def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : int =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase : Optional[int] =os.path.join( snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
348
1
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
348
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool: UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCAmelCase : List[Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__lowerCAmelCase ) ) # The ratio of the area for circle to square is pi/4. UpperCAmelCase : Dict =proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float: '''simple docstring''' return mean( function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None: '''simple docstring''' def identity_function(__lowerCAmelCase ) -> float: return x UpperCAmelCase : List[Any] =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('''******************''' ) def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' def function_to_integrate(__lowerCAmelCase ) -> float: return sqrt(4.0 - x * x ) UpperCAmelCase : Dict =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __snake_case = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : bool = field(default=lowerCamelCase__ , metadata={"""help""": """Whether to use SortishSampler or not."""} ) __lowerCamelCase : bool = field( default=lowerCamelCase__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) __lowerCamelCase : Optional[int] = field( default=lowerCamelCase__ , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) __lowerCamelCase : Optional[int] = field( default=lowerCamelCase__ , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) __lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field( default=lowerCamelCase__ , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Any =super().to_dict() for k, v in d.items(): if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Tuple =v.to_dict() return d
348
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : List[Any] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Union[str, Any] =use_input_mask UpperCAmelCase : Tuple =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : Tuple =hidden_size UpperCAmelCase : Dict =projection_dim UpperCAmelCase : Optional[int] =num_hidden_layers UpperCAmelCase : Dict =num_attention_heads UpperCAmelCase : int =intermediate_size UpperCAmelCase : Any =dropout UpperCAmelCase : Union[str, Any] =attention_dropout UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : str =scope UpperCAmelCase : str =bos_token_id def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : int =None if self.use_input_mask: UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Optional[int] =input_mask.numpy() UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : List[Any] =1 UpperCAmelCase : Tuple =0 UpperCAmelCase : List[Any] =self.get_config() return config, input_ids, tf.convert_to_tensor(snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ ) UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ ) UpperCAmelCase : str =model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () __lowerCamelCase : Dict = False __lowerCamelCase : Optional[Any] = False __lowerCamelCase : Dict = False def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : str =BlipTextModelTester(self ) UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__=True ) -> Any: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
348
1
class __snake_case : def __init__( self , snake_case__ ) -> None: '''simple docstring''' UpperCAmelCase : Optional[int] =size UpperCAmelCase : str =[0] * size UpperCAmelCase : str =[0] * size @staticmethod def UpperCAmelCase__ ( snake_case__ ) -> int: '''simple docstring''' return index | (index + 1) @staticmethod def UpperCAmelCase__ ( snake_case__ ) -> int: '''simple docstring''' return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> None: '''simple docstring''' UpperCAmelCase : Union[str, Any] =value while index < self.size: UpperCAmelCase : List[Any] =self.get_prev(snake_case__ ) + 1 if current_left_border == index: UpperCAmelCase : Optional[Any] =value else: UpperCAmelCase : Union[str, Any] =max(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[Any] =self.get_next(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' right -= 1 # Because of right is exclusive UpperCAmelCase : List[Any] =0 while left <= right: UpperCAmelCase : Any =self.get_prev(snake_case__ ) if left <= current_left: UpperCAmelCase : List[str] =max(snake_case__ , self.tree[right] ) UpperCAmelCase : Tuple =current_left else: UpperCAmelCase : str =max(snake_case__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
348
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase ) UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase ) return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = CLIPConfig __lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""] def __init__( self , snake_case__ ) -> Dict: '''simple docstring''' super().__init__(snake_case__ ) UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config ) UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ ) UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ ) UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ ) @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy() UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy() UpperCAmelCase : Tuple =[] UpperCAmelCase : Dict =image_embeds.shape[0] for i in range(snake_case__ ): UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : str =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx] UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCAmelCase : int =0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCAmelCase : Any =cos_dist[i][concept_idx] UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item() UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case__ ) result.append(snake_case__ ) UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : List[str] =self.visual_projection(snake_case__ ) UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds ) UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : Optional[Any] =0.0 UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 ) UpperCAmelCase : List[Any] =special_care * 0.01 UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
348
1
from __future__ import annotations def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> list: '''simple docstring''' UpperCAmelCase : List[str] =[] UpperCAmelCase , UpperCAmelCase : Optional[Any] =input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase : str =result + left + right return input_list def lowerCAmelCase_ ( __lowerCAmelCase )-> list: '''simple docstring''' if len(__lowerCAmelCase ) <= 1: return input_list UpperCAmelCase : List[str] =list(__lowerCAmelCase ) # iteration for two-way merging UpperCAmelCase : Tuple =2 while p <= len(__lowerCAmelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ): UpperCAmelCase : Tuple =i UpperCAmelCase : Any =i + p - 1 UpperCAmelCase : List[str] =(low + high + 1) // 2 UpperCAmelCase : Union[str, Any] =merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # final merge of last two parts if p * 2 >= len(__lowerCAmelCase ): UpperCAmelCase : List[str] =i UpperCAmelCase : List[Any] =merge(__lowerCAmelCase , 0 , __lowerCAmelCase , len(__lowerCAmelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __snake_case = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": __snake_case = [] else: __snake_case = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
348
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __snake_case = parser.parse_args() __snake_case = '''cpu''' __snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __snake_case = '''path-to-your-trained-model''' __snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __snake_case = pipe.to(device) # to channels last __snake_case = pipe.unet.to(memory_format=torch.channels_last) __snake_case = pipe.vae.to(memory_format=torch.channels_last) __snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __snake_case = torch.randn(2, 4, 64, 64) __snake_case = torch.rand(1) * 9_99 __snake_case = torch.randn(2, 77, 7_68) __snake_case = (sample, timestep, encoder_hidden_status) try: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __snake_case = 6_66 __snake_case = torch.Generator(device).manual_seed(seed) __snake_case = {'''generator''': generator} if args.steps is not None: __snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
348
1
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """encodec""" def __init__( self , snake_case__=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case__=2_4000 , snake_case__=1 , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=128 , snake_case__=32 , snake_case__=1 , snake_case__=[8, 5, 4, 2] , snake_case__="weight_norm" , snake_case__=7 , snake_case__=7 , snake_case__=3 , snake_case__=2 , snake_case__=True , snake_case__="reflect" , snake_case__=2 , snake_case__=2 , snake_case__=1.0 , snake_case__=1024 , snake_case__=None , snake_case__=True , **snake_case__ , ) -> List[str]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =target_bandwidths UpperCAmelCase : Optional[int] =sampling_rate UpperCAmelCase : Optional[Any] =audio_channels UpperCAmelCase : str =normalize UpperCAmelCase : Dict =chunk_length_s UpperCAmelCase : List[Any] =overlap UpperCAmelCase : str =hidden_size UpperCAmelCase : Any =num_filters UpperCAmelCase : str =num_residual_layers UpperCAmelCase : List[str] =upsampling_ratios UpperCAmelCase : Tuple =norm_type UpperCAmelCase : Optional[Any] =kernel_size UpperCAmelCase : Any =last_kernel_size UpperCAmelCase : int =residual_kernel_size UpperCAmelCase : Tuple =dilation_growth_rate UpperCAmelCase : str =use_causal_conv UpperCAmelCase : Union[str, Any] =pad_mode UpperCAmelCase : List[str] =compress UpperCAmelCase : List[str] =num_lstm_layers UpperCAmelCase : Optional[Any] =trim_right_ratio UpperCAmelCase : Optional[int] =codebook_size UpperCAmelCase : Dict =codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase : List[str] =use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**snake_case__ ) @property def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[int] =np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
348
__snake_case = '''Input must be a string of 8 numbers plus letter''' __snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE''' def lowerCAmelCase_ ( __lowerCAmelCase )-> bool: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}''' raise TypeError(__lowerCAmelCase ) UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper() if len(__lowerCAmelCase ) != 9: raise ValueError(__lowerCAmelCase ) try: UpperCAmelCase : int =int(spanish_id_clean[0:8] ) UpperCAmelCase : Optional[int] =spanish_id_clean[8] except ValueError as ex: raise ValueError(__lowerCAmelCase ) from ex if letter.isdigit(): raise ValueError(__lowerCAmelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
348
1
import heapq def lowerCAmelCase_ ( __lowerCAmelCase )-> set[int]: '''simple docstring''' UpperCAmelCase : list[list] =[] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(__lowerCAmelCase , [-1 * len(__lowerCAmelCase ), (key, value)] ) # chosen_vertices = set of chosen vertices UpperCAmelCase : str =set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices UpperCAmelCase : Any =heapq.heappop(__lowerCAmelCase )[1][0] chosen_vertices.add(__lowerCAmelCase ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: UpperCAmelCase : Any =elem[1][1].index(__lowerCAmelCase ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(__lowerCAmelCase ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() __snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
348
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:] if shift_amount >= len(__lowerCAmelCase ): return "0b0" UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:] else: # Get binary (2's complement) representation of negative number UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Optional[Any] =( '''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number ) if shift_amount >= len(__lowerCAmelCase ): return "0b" + binary_number[0] * len(__lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class __snake_case ( lowerCamelCase__ ): @staticmethod @abstractmethod def UpperCAmelCase__ ( snake_case__ ) -> Optional[int]: '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' raise NotImplementedError()
348
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) # TODO Update this __snake_case = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """esm""" def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =vocab_size UpperCAmelCase : str =hidden_size UpperCAmelCase : List[Any] =num_hidden_layers UpperCAmelCase : Optional[Any] =num_attention_heads UpperCAmelCase : str =intermediate_size UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : int =attention_probs_dropout_prob UpperCAmelCase : Dict =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : Union[str, Any] =layer_norm_eps UpperCAmelCase : Dict =position_embedding_type UpperCAmelCase : Optional[Any] =use_cache UpperCAmelCase : int =emb_layer_norm_before UpperCAmelCase : List[str] =token_dropout UpperCAmelCase : Optional[Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase : Optional[Any] =EsmFoldConfig() elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ ) UpperCAmelCase : Tuple =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase : Any =get_default_vocab_list() else: UpperCAmelCase : Tuple =vocab_list else: UpperCAmelCase : Optional[int] =None UpperCAmelCase : Union[str, Any] =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , snake_case__ ): UpperCAmelCase : str =self.esmfold_config.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : str = None __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : float = 0 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : int = 128 __lowerCamelCase : "TrunkConfig" = None def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.trunk is None: UpperCAmelCase : str =TrunkConfig() elif isinstance(self.trunk , snake_case__ ): UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =asdict(self ) UpperCAmelCase : Any =self.trunk.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 48 __lowerCamelCase : int = 1024 __lowerCamelCase : int = 128 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : float = 0 __lowerCamelCase : float = 0 __lowerCamelCase : bool = False __lowerCamelCase : int = 4 __lowerCamelCase : Optional[int] = 128 __lowerCamelCase : "StructureModuleConfig" = None def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' if self.structure_module is None: UpperCAmelCase : Any =StructureModuleConfig() elif isinstance(self.structure_module , snake_case__ ): UpperCAmelCase : str =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =asdict(self ) UpperCAmelCase : Tuple =self.structure_module.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 384 __lowerCamelCase : int = 128 __lowerCamelCase : int = 16 __lowerCamelCase : int = 128 __lowerCamelCase : int = 12 __lowerCamelCase : int = 4 __lowerCamelCase : int = 8 __lowerCamelCase : float = 0.1 __lowerCamelCase : int = 8 __lowerCamelCase : int = 1 __lowerCamelCase : int = 2 __lowerCamelCase : int = 7 __lowerCamelCase : int = 10 __lowerCamelCase : float = 1E-8 __lowerCamelCase : float = 1E5 def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return asdict(self ) def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
348
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('''Googling.....''') __snake_case = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:]) __snake_case = requests.get(url, headers={'''UserAgent''': UserAgent().random}) # res.raise_for_status() with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class for data in res.iter_content(1_00_00): out_file.write(data) __snake_case = BeautifulSoup(res.text, '''html.parser''') __snake_case = list(soup.select('''.eZt8xd'''))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('''href''')) else: webbrowser.open(f'https://google.com{link.get("href")}')
348
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,) __lowerCamelCase : List[str] = 10 def UpperCAmelCase__ ( self , **snake_case__ ) -> str: '''simple docstring''' UpperCAmelCase : int ={ '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**snake_case__ ) return config def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : str =self.dummy_model() UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Any =model(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : int =output.prev_sample UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config() UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : Optional[int] =self.dummy_model() UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : str =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =output.prev_sample UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : List[Any] =self.scheduler_classes[0] UpperCAmelCase : Dict =self.get_scheduler_config() UpperCAmelCase : List[str] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ ) UpperCAmelCase : int =self.dummy_model() UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : int =model(snake_case__ , snake_case__ ) UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =output.prev_sample UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) ) if str(snake_case__ ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
348
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id ) UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean() UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item()) UpperCAmelCase : List[str] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
1
def lowerCAmelCase_ ( __lowerCAmelCase = 2_00 )-> int: '''simple docstring''' UpperCAmelCase : str =[1, 2, 5, 10, 20, 50, 1_00, 2_00] UpperCAmelCase : Optional[int] =[0] * (pence + 1) UpperCAmelCase : int =1 # base case: 1 way to make 0 pence for coin in coins: for i in range(__lowerCAmelCase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
348
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[Any] =ort.SessionOptions() UpperCAmelCase : Optional[int] =False return options def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Dict ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : Any =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : str =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : int =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
348
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''facebook/nllb-large-en-ro''': 10_24, '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off __snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : str = VOCAB_FILES_NAMES __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] = ["""input_ids""", """attention_mask"""] __lowerCamelCase : Any = NllbTokenizer __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token UpperCAmelCase : List[Any] =legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) UpperCAmelCase : Optional[Any] =vocab_file UpperCAmelCase : Optional[Any] =False if not self.vocab_file else True UpperCAmelCase : Dict =FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) UpperCAmelCase : List[str] ={ lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase : int =src_lang if src_lang is not None else '''eng_Latn''' UpperCAmelCase : List[Any] =self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase : Optional[int] =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def UpperCAmelCase__ ( self , snake_case__ ) -> None: '''simple docstring''' UpperCAmelCase : Any =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : List[str] =[self.sep_token_id] UpperCAmelCase : List[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCAmelCase : Dict =src_lang UpperCAmelCase : List[str] =self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =self.convert_tokens_to_ids(snake_case__ ) UpperCAmelCase : str =tgt_lang_id return inputs def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ) -> BatchEncoding: '''simple docstring''' UpperCAmelCase : str =src_lang UpperCAmelCase : str =tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase__ ( self , snake_case__ ) -> None: '''simple docstring''' UpperCAmelCase : List[str] =self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: UpperCAmelCase : Tuple =[] UpperCAmelCase : List[Any] =[self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : Any =[self.cur_lang_code] UpperCAmelCase : str =[self.eos_token_id] UpperCAmelCase : Any =self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : Optional[Any] =self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : Optional[int] =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase__ ( self , snake_case__ ) -> None: '''simple docstring''' UpperCAmelCase : Dict =self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: UpperCAmelCase : int =[] UpperCAmelCase : str =[self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : Optional[int] =[self.cur_lang_code] UpperCAmelCase : str =[self.eos_token_id] UpperCAmelCase : str =self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : List[Any] =self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : int =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return UpperCAmelCase : Union[str, Any] =os.path.join( snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
348
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase_ ( )-> int: '''simple docstring''' UpperCAmelCase : str ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase ) return dataset class __snake_case ( lowerCamelCase__ ): def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =get_dataset() UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str =get_dataset() UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ ) self.assertEqual(len(snake_case__ ) , 2 ) print(snake_case__ ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
348
1
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class __snake_case : def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ) -> int: '''simple docstring''' UpperCAmelCase : Any =parent UpperCAmelCase : Tuple =batch_size UpperCAmelCase : List[Any] =seq_length UpperCAmelCase : int =is_training UpperCAmelCase : List[str] =use_token_type_ids UpperCAmelCase : Optional[int] =use_input_mask UpperCAmelCase : Tuple =use_labels UpperCAmelCase : Dict =use_mc_token_ids UpperCAmelCase : Tuple =vocab_size UpperCAmelCase : Union[str, Any] =hidden_size UpperCAmelCase : List[str] =num_hidden_layers UpperCAmelCase : Any =num_attention_heads UpperCAmelCase : Union[str, Any] =intermediate_size UpperCAmelCase : List[str] =hidden_act UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : List[str] =attention_probs_dropout_prob UpperCAmelCase : Tuple =max_position_embeddings UpperCAmelCase : Optional[Any] =type_vocab_size UpperCAmelCase : Dict =type_sequence_label_size UpperCAmelCase : Optional[int] =initializer_range UpperCAmelCase : List[str] =num_labels UpperCAmelCase : Optional[Any] =num_choices UpperCAmelCase : int =scope UpperCAmelCase : List[str] =self.vocab_size - 1 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[int] =None if self.use_input_mask: UpperCAmelCase : Tuple =random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Dict =None if self.use_token_type_ids: UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : List[str] =None if self.use_mc_token_ids: UpperCAmelCase : str =ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) UpperCAmelCase : Any =None UpperCAmelCase : Optional[Any] =None UpperCAmelCase : Optional[Any] =None if self.use_labels: UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : List[str] =ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Any =self.get_config() UpperCAmelCase : Optional[int] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ) -> str: '''simple docstring''' UpperCAmelCase : Union[str, Any] =CTRLModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() model(snake_case__ , token_type_ids=snake_case__ , head_mask=snake_case__ ) model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase : str =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int =CTRLLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase : Optional[Any] =model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Dict =self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : str =config_and_inputs UpperCAmelCase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Tuple =self.num_labels UpperCAmelCase : int =CTRLForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[Any] =model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () __lowerCamelCase : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else () __lowerCamelCase : Tuple = ( { """feature-extraction""": CTRLModel, """text-classification""": CTRLForSequenceClassification, """text-generation""": CTRLLMHeadModel, """zero-shot""": CTRLForSequenceClassification, } if is_torch_available() else {} ) __lowerCamelCase : Optional[int] = True __lowerCamelCase : Optional[Any] = False __lowerCamelCase : List[str] = False def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : List[str] =CTRLModelTester(self ) UpperCAmelCase : Dict =ConfigTester(self , config_class=snake_case__ , n_embd=37 ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*snake_case__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' pass @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] =CTRLModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' pass @require_torch class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(snake_case__ ) UpperCAmelCase : Optional[int] =torch.tensor( [[1_1859, 0, 1611, 8]] , dtype=torch.long , device=snake_case__ ) # Legal the president is UpperCAmelCase : List[Any] =[ 1_1859, 0, 1611, 8, 5, 150, 2_6449, 2, 19, 348, 469, 3, 2595, 48, 2_0740, 24_6533, 24_6533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a UpperCAmelCase : Optional[Any] =model.generate(snake_case__ , do_sample=snake_case__ ) self.assertListEqual(output_ids[0].tolist() , snake_case__ )
348
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str: '''simple docstring''' super().__init__() UpperCAmelCase : Optional[Any] =learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ ) else: UpperCAmelCase : Union[str, Any] =None UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : VQModel __lowerCamelCase : CLIPTextModel __lowerCamelCase : CLIPTokenizer __lowerCamelCase : TransformeraDModel __lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings __lowerCamelCase : VQDiffusionScheduler def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int: '''simple docstring''' super().__init__() self.register_modules( vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1 # get prompt text embeddings UpperCAmelCase : Optional[int] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) UpperCAmelCase : int =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate text embeddings for each generation per prompt UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 ) else: UpperCAmelCase : str =[''''''] * batch_size UpperCAmelCase : Tuple =text_input_ids.shape[-1] UpperCAmelCase : Optional[Any] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , ) UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1] UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 ) UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =1 elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Tuple =len(snake_case__ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' ) UpperCAmelCase : Tuple =batch_size * num_images_per_prompt UpperCAmelCase : List[str] =guidance_scale > 1.0 UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(snake_case__ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1 UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCAmelCase : Any =latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(snake_case__ , device=self.device ) UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device ) UpperCAmelCase : Optional[int] =latents for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 ) UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ ) UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase : Optional[Any] =model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ ) UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ ) UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ ) UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ ) UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase : int =keep_mask[:, :-1, :] UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase : Dict =log_p_x_0.clone() UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0) return rv
348
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __snake_case = logging.get_logger(__name__) __snake_case = { '''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''', } # fmt: off __snake_case = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] __snake_case = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = """whisper""" __lowerCamelCase : List[str] = ["""past_key_values"""] __lowerCamelCase : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , snake_case__=5_1865 , snake_case__=80 , snake_case__=6 , snake_case__=4 , snake_case__=6 , snake_case__=4 , snake_case__=1536 , snake_case__=1536 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=5_0257 , snake_case__=True , snake_case__=True , snake_case__="gelu" , snake_case__=256 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=False , snake_case__=1500 , snake_case__=448 , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=None , snake_case__=[220, 5_0256] , snake_case__=False , snake_case__=256 , snake_case__=False , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=7 , **snake_case__ , ) -> List[str]: '''simple docstring''' UpperCAmelCase : int =vocab_size UpperCAmelCase : Union[str, Any] =num_mel_bins UpperCAmelCase : Optional[int] =d_model UpperCAmelCase : Any =encoder_layers UpperCAmelCase : List[str] =encoder_attention_heads UpperCAmelCase : Optional[Any] =decoder_layers UpperCAmelCase : Optional[int] =decoder_attention_heads UpperCAmelCase : Union[str, Any] =decoder_ffn_dim UpperCAmelCase : int =encoder_ffn_dim UpperCAmelCase : Any =dropout UpperCAmelCase : Optional[Any] =attention_dropout UpperCAmelCase : Any =activation_dropout UpperCAmelCase : Dict =activation_function UpperCAmelCase : Optional[int] =init_std UpperCAmelCase : Optional[Any] =encoder_layerdrop UpperCAmelCase : str =decoder_layerdrop UpperCAmelCase : Union[str, Any] =use_cache UpperCAmelCase : Tuple =encoder_layers UpperCAmelCase : List[Any] =scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase : Optional[Any] =max_source_positions UpperCAmelCase : int =max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCAmelCase : Optional[int] =classifier_proj_size UpperCAmelCase : str =use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Tuple =apply_spec_augment UpperCAmelCase : Optional[Any] =mask_time_prob UpperCAmelCase : str =mask_time_length UpperCAmelCase : str =mask_time_min_masks UpperCAmelCase : int =mask_feature_prob UpperCAmelCase : List[str] =mask_feature_length UpperCAmelCase : str =mask_feature_min_masks UpperCAmelCase : Union[str, Any] =median_filter_width super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , suppress_tokens=snake_case__ , begin_suppress_tokens=snake_case__ , **snake_case__ , ) class __snake_case ( lowerCamelCase__ ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase : Dict ={0: '''batch'''} else: UpperCAmelCase : Optional[int] ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction='''inputs''' ) return common_inputs def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 2_2050 , snake_case__ = 5.0 , snake_case__ = 220 , ) -> Mapping[str, Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =OrderedDict() UpperCAmelCase : Any =OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case__ , framework=snake_case__ , sampling_rate=snake_case__ , time_duration=snake_case__ , frequency=snake_case__ , ) UpperCAmelCase : Optional[int] =encoder_inputs['''input_features'''].shape[2] UpperCAmelCase : List[str] =encoder_sequence_length // 2 if self.use_past else seq_length UpperCAmelCase : str =super().generate_dummy_inputs( preprocessor.tokenizer , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =encoder_inputs.pop('''input_features''' ) UpperCAmelCase : List[Any] =decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: UpperCAmelCase : Tuple =decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def UpperCAmelCase__ ( self ) -> float: '''simple docstring''' return 1e-3
348
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =self.dummy_uncond_unet UpperCAmelCase : Optional[int] =KarrasVeScheduler() UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[str] =torch.manual_seed(0 ) UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : str =torch.manual_seed(0 ) UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0] UpperCAmelCase : Any =image[0, -3:, -3:, -1] UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256''' UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ ) UpperCAmelCase : Dict =KarrasVeScheduler() UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Any =torch.manual_seed(0 ) UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
348
1
class __snake_case : def __init__( self , snake_case__ , snake_case__ ) -> Any: '''simple docstring''' UpperCAmelCase : Union[str, Any] =name UpperCAmelCase : str =val def __str__( self ) -> List[Any]: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self , snake_case__ ) -> int: '''simple docstring''' return self.val < other.val class __snake_case : def __init__( self , snake_case__ ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[Any] ={} UpperCAmelCase : Union[str, Any] ={} UpperCAmelCase : str =self.build_heap(snake_case__ ) def __getitem__( self , snake_case__ ) -> Optional[int]: '''simple docstring''' return self.get_value(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]: '''simple docstring''' return (idx - 1) // 2 def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]: '''simple docstring''' return idx * 2 + 1 def UpperCAmelCase__ ( self , snake_case__ ) -> Any: '''simple docstring''' return idx * 2 + 2 def UpperCAmelCase__ ( self , snake_case__ ) -> Dict: '''simple docstring''' return self.heap_dict[key] def UpperCAmelCase__ ( self , snake_case__ ) -> Any: '''simple docstring''' UpperCAmelCase : Union[str, Any] =len(snake_case__ ) - 1 UpperCAmelCase : Dict =self.get_parent_idx(snake_case__ ) for idx, i in enumerate(snake_case__ ): UpperCAmelCase : Tuple =idx UpperCAmelCase : List[str] =i.val for i in range(snake_case__ , -1 , -1 ): self.sift_down(snake_case__ , snake_case__ ) return array def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' while True: UpperCAmelCase : int =self.get_left_child_idx(snake_case__ ) # noqa: E741 UpperCAmelCase : Optional[Any] =self.get_right_child_idx(snake_case__ ) UpperCAmelCase : Optional[Any] =idx if l < len(snake_case__ ) and array[l] < array[idx]: UpperCAmelCase : Optional[int] =l if r < len(snake_case__ ) and array[r] < array[smallest]: UpperCAmelCase : Tuple =r if smallest != idx: UpperCAmelCase , UpperCAmelCase : Tuple =array[smallest], array[idx] ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Tuple =( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase : List[str] =smallest else: break def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Tuple =self.get_parent_idx(snake_case__ ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase , UpperCAmelCase : Optional[int] =self.heap[idx], self.heap[p] UpperCAmelCase , UpperCAmelCase : Dict =( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase : Optional[Any] =p UpperCAmelCase : Optional[int] =self.get_parent_idx(snake_case__ ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return self.heap[0] def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple =self.heap[-1], self.heap[0] UpperCAmelCase , UpperCAmelCase : Tuple =( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase : Any =self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]: '''simple docstring''' self.heap.append(snake_case__ ) UpperCAmelCase : Tuple =len(self.heap ) - 1 UpperCAmelCase : Dict =node.val self.sift_up(len(self.heap ) - 1 ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return len(self.heap ) == 0 def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[str]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase : Dict =new_value UpperCAmelCase : List[Any] =new_value self.sift_up(self.idx_of_element[node] ) __snake_case = Node('''R''', -1) __snake_case = Node('''B''', 6) __snake_case = Node('''A''', 3) __snake_case = Node('''X''', 1) __snake_case = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __snake_case = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
348
import qiskit def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' ) UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = half_adder(1, 1) print(f'Half Adder Output Qubit Counts: {counts}')
348
1
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase ) UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase ) return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = CLIPConfig __lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""] def __init__( self , snake_case__ ) -> Dict: '''simple docstring''' super().__init__(snake_case__ ) UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config ) UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ ) UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ ) UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ ) @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy() UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy() UpperCAmelCase : Tuple =[] UpperCAmelCase : Dict =image_embeds.shape[0] for i in range(snake_case__ ): UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : str =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx] UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCAmelCase : int =0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCAmelCase : Any =cos_dist[i][concept_idx] UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item() UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case__ ) result.append(snake_case__ ) UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : List[str] =self.visual_projection(snake_case__ ) UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds ) UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : Optional[Any] =0.0 UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 ) UpperCAmelCase : List[Any] =special_care * 0.01 UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
348
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __snake_case : __lowerCamelCase : str = BlenderbotConfig __lowerCamelCase : Optional[Any] = {} __lowerCamelCase : Optional[int] = """gelu""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : Dict =seq_length UpperCAmelCase : Optional[Any] =is_training UpperCAmelCase : List[str] =use_labels UpperCAmelCase : List[Any] =vocab_size UpperCAmelCase : Optional[int] =hidden_size UpperCAmelCase : Tuple =num_hidden_layers UpperCAmelCase : Any =num_attention_heads UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : Optional[int] =attention_probs_dropout_prob UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : List[Any] =eos_token_id UpperCAmelCase : Optional[int] =pad_token_id UpperCAmelCase : Tuple =bos_token_id def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[Any] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder() UpperCAmelCase : Any =inputs_dict['''input_ids'''] UpperCAmelCase : str =input_ids[:1, :] UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase : Tuple =inputs_dict['''head_mask'''] UpperCAmelCase : List[Any] =1 # first forward pass UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str: '''simple docstring''' if attention_mask is None: UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : Tuple =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase : Dict = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] =TFBlenderbotModelTester(self ) UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class __snake_case ( unittest.TestCase ): __lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""] __lowerCamelCase : Dict = """facebook/blenderbot-400M-distill""" @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' ) UpperCAmelCase : Optional[int] =self.model.generate( model_inputs.input_ids , ) UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
348
1
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class __snake_case : def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=False , snake_case__=True , snake_case__="None" , snake_case__=3 , snake_case__=4 , snake_case__=None , ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[Any] =parent UpperCAmelCase : str =batch_size UpperCAmelCase : int =seq_length UpperCAmelCase : int =is_training UpperCAmelCase : Optional[int] =use_input_mask UpperCAmelCase : List[Any] =use_token_type_ids UpperCAmelCase : List[str] =use_labels UpperCAmelCase : Optional[int] =vocab_size UpperCAmelCase : int =hidden_size UpperCAmelCase : str =num_hidden_layers UpperCAmelCase : Any =num_attention_heads UpperCAmelCase : Optional[Any] =intermediate_size UpperCAmelCase : Tuple =hidden_act UpperCAmelCase : Optional[int] =hidden_dropout_prob UpperCAmelCase : List[Any] =attention_probs_dropout_prob UpperCAmelCase : List[Any] =max_position_embeddings UpperCAmelCase : str =type_vocab_size UpperCAmelCase : Optional[Any] =type_sequence_label_size UpperCAmelCase : Dict =initializer_range UpperCAmelCase : List[str] =num_labels UpperCAmelCase : Optional[Any] =num_choices UpperCAmelCase : Dict =relative_attention UpperCAmelCase : Union[str, Any] =position_biased_input UpperCAmelCase : Any =pos_att_type UpperCAmelCase : Optional[int] =scope def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] =None if self.use_input_mask: UpperCAmelCase : Any =random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[int] =None if self.use_token_type_ids: UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : Optional[int] =None UpperCAmelCase : int =None UpperCAmelCase : List[str] =None if self.use_labels: UpperCAmelCase : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Tuple =DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=snake_case__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any: '''simple docstring''' UpperCAmelCase : str =TFDebertaVaModel(config=snake_case__ ) UpperCAmelCase : Any ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase : List[str] =[input_ids, input_mask] UpperCAmelCase : Union[str, Any] =model(snake_case__ ) UpperCAmelCase : int =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]: '''simple docstring''' UpperCAmelCase : Tuple =TFDebertaVaForMaskedLM(config=snake_case__ ) UpperCAmelCase : int ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : Optional[Any] =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any: '''simple docstring''' UpperCAmelCase : List[str] =self.num_labels UpperCAmelCase : List[Any] =TFDebertaVaForSequenceClassification(config=snake_case__ ) UpperCAmelCase : Any ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : str =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[Any] =self.num_labels UpperCAmelCase : Optional[int] =TFDebertaVaForTokenClassification(config=snake_case__ ) UpperCAmelCase : Optional[Any] ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : Any =model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Optional[Any] =TFDebertaVaForQuestionAnswering(config=snake_case__ ) UpperCAmelCase : Dict ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase : Union[str, Any] =model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : List[Any] =config_and_inputs UpperCAmelCase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Union[str, Any] = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) __lowerCamelCase : Union[str, Any] = ( { """feature-extraction""": TFDebertaVaModel, """fill-mask""": TFDebertaVaForMaskedLM, """question-answering""": TFDebertaVaForQuestionAnswering, """text-classification""": TFDebertaVaForSequenceClassification, """token-classification""": TFDebertaVaForTokenClassification, """zero-shot""": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) __lowerCamelCase : Any = False __lowerCamelCase : Any = False def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : str =TFDebertaVaModelTester(self ) UpperCAmelCase : Union[str, Any] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case__ ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[Any] =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(snake_case__ ) @require_tf class __snake_case ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' pass @slow def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) UpperCAmelCase : Any =tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) UpperCAmelCase : Tuple =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase : Dict =model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase : Optional[int] =tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 )
348
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = """sew-d""" def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int: '''simple docstring''' super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) UpperCAmelCase : Union[str, Any] =hidden_size UpperCAmelCase : Union[str, Any] =feat_extract_norm UpperCAmelCase : Optional[Any] =feat_extract_activation UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : int =list(snake_case__ ) UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : str =conv_bias UpperCAmelCase : Tuple =num_conv_pos_embeddings UpperCAmelCase : Dict =num_conv_pos_embedding_groups UpperCAmelCase : str =len(self.conv_dim ) UpperCAmelCase : Dict =num_hidden_layers UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : List[Any] =squeeze_factor UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : int =position_buckets UpperCAmelCase : Optional[int] =share_att_key UpperCAmelCase : Optional[int] =relative_attention UpperCAmelCase : Tuple =norm_rel_ebd UpperCAmelCase : List[Any] =list(snake_case__ ) UpperCAmelCase : Dict =hidden_act UpperCAmelCase : Optional[int] =num_attention_heads UpperCAmelCase : Any =hidden_dropout UpperCAmelCase : str =attention_dropout UpperCAmelCase : Union[str, Any] =activation_dropout UpperCAmelCase : str =feat_proj_dropout UpperCAmelCase : Union[str, Any] =final_dropout UpperCAmelCase : Optional[int] =layer_norm_eps UpperCAmelCase : str =feature_layer_norm_eps UpperCAmelCase : str =initializer_range UpperCAmelCase : Any =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Union[str, Any] =apply_spec_augment UpperCAmelCase : Optional[Any] =mask_time_prob UpperCAmelCase : Tuple =mask_time_length UpperCAmelCase : str =mask_time_min_masks UpperCAmelCase : Optional[int] =mask_feature_prob UpperCAmelCase : Optional[Any] =mask_feature_length UpperCAmelCase : List[Any] =mask_feature_min_masks # ctc loss UpperCAmelCase : str =ctc_loss_reduction UpperCAmelCase : Optional[int] =ctc_zero_infinity # sequence classification UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum UpperCAmelCase : int =classifier_proj_size @property def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
1
def lowerCAmelCase_ ( __lowerCAmelCase = 10_00 )-> int: '''simple docstring''' UpperCAmelCase : Dict =2**power UpperCAmelCase : Optional[Any] =0 while n: UpperCAmelCase , UpperCAmelCase : List[str] =r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
348
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __snake_case = 4 __snake_case = 3 class __snake_case ( lowerCamelCase__ ): pass def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]: '''simple docstring''' for shard in shards: for i in range(__lowerCAmelCase ): yield {"i": i, "shard": shard} def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =int(os.environ['''RANK'''] ) UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] ) UpperCAmelCase : List[Any] =ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCAmelCase ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase ) parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 ) UpperCAmelCase : Any =parser.parse_args() UpperCAmelCase : List[str] =args.streaming UpperCAmelCase : Tuple =args.num_workers UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]} UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase ) if not streaming: UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) ) UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase ) UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase ) UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCAmelCase : str =full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCAmelCase : List[Any] =sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
348
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( lowerCamelCase__ ): def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , **snake_case__ , ) -> List[Any]: '''simple docstring''' super().__init__( snake_case__ , split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , ) UpperCAmelCase : str =path_or_paths if isinstance(snake_case__ , snake_case__ ) else {self.split: path_or_paths} UpperCAmelCase : List[str] =Text( cache_dir=snake_case__ , data_files=snake_case__ , features=snake_case__ , **snake_case__ , ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' if self.streaming: UpperCAmelCase : str =self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase : Optional[Any] =None UpperCAmelCase : Optional[int] =None UpperCAmelCase : List[str] =None UpperCAmelCase : Any =None self.builder.download_and_prepare( download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , ) UpperCAmelCase : List[Any] =self.builder.as_dataset( split=self.split , verification_mode=snake_case__ , in_memory=self.keep_in_memory ) return dataset
348
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __snake_case = logging.get_logger(__name__) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = ["""pixel_values"""] def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> None: '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase : List[str] =size if size is not None else {'''shortest_edge''': 384} UpperCAmelCase : int =get_size_dict(snake_case__ , default_to_square=snake_case__ ) UpperCAmelCase : Optional[Any] =do_resize UpperCAmelCase : int =size # Default value set here for backwards compatibility where the value in config is None UpperCAmelCase : List[Any] =crop_pct if crop_pct is not None else 224 / 256 UpperCAmelCase : Union[str, Any] =resample UpperCAmelCase : List[Any] =do_rescale UpperCAmelCase : List[Any] =rescale_factor UpperCAmelCase : Optional[int] =do_normalize UpperCAmelCase : Tuple =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase : Optional[int] =image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray: '''simple docstring''' UpperCAmelCase : str =get_size_dict(snake_case__ , default_to_square=snake_case__ ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) UpperCAmelCase : Dict =size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct UpperCAmelCase : Optional[int] =int(shortest_edge / crop_pct ) UpperCAmelCase : Tuple =get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ ) UpperCAmelCase : Optional[int] =resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=snake_case__ , size=(shortest_edge, shortest_edge) , data_format=snake_case__ , **snake_case__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( snake_case__ , size=(shortest_edge, shortest_edge) , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> int: '''simple docstring''' return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray: '''simple docstring''' return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image: '''simple docstring''' UpperCAmelCase : List[Any] =do_resize if do_resize is not None else self.do_resize UpperCAmelCase : Dict =crop_pct if crop_pct is not None else self.crop_pct UpperCAmelCase : str =resample if resample is not None else self.resample UpperCAmelCase : List[str] =do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : int =do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : Tuple =image_mean if image_mean is not None else self.image_mean UpperCAmelCase : Any =image_std if image_std is not None else self.image_std UpperCAmelCase : Dict =size if size is not None else self.size UpperCAmelCase : Any =get_size_dict(snake_case__ , default_to_square=snake_case__ ) UpperCAmelCase : Optional[Any] =make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase : Optional[int] =[to_numpy_array(snake_case__ ) for image in images] if do_resize: UpperCAmelCase : int =[self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images] if do_rescale: UpperCAmelCase : Optional[int] =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images] if do_normalize: UpperCAmelCase : int =[self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images] UpperCAmelCase : List[str] =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] UpperCAmelCase : List[Any] ={'''pixel_values''': images} return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
348
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __snake_case : def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str: '''simple docstring''' UpperCAmelCase : str =parent UpperCAmelCase : Tuple =batch_size UpperCAmelCase : Optional[int] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Tuple =use_input_mask UpperCAmelCase : List[Any] =use_token_type_ids UpperCAmelCase : Optional[Any] =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : List[Any] =hidden_size UpperCAmelCase : Optional[int] =rotary_dim UpperCAmelCase : Union[str, Any] =num_hidden_layers UpperCAmelCase : List[Any] =num_attention_heads UpperCAmelCase : Dict =intermediate_size UpperCAmelCase : Union[str, Any] =hidden_act UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : Dict =attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : str =initializer_range UpperCAmelCase : Optional[int] =None UpperCAmelCase : List[Any] =vocab_size - 1 UpperCAmelCase : Optional[Any] =vocab_size - 1 UpperCAmelCase : List[Any] =vocab_size - 1 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] =None if self.use_input_mask: UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Dict =GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =20 UpperCAmelCase : Any =model_class_name(snake_case__ ) UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =model( input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , ) UpperCAmelCase : List[Any] =model(snake_case__ ) UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Dict =20 UpperCAmelCase : Dict =model_class_name(snake_case__ ) UpperCAmelCase : Tuple =jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : int =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : str =model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) @tooslow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ ) UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : str =False UpperCAmelCase : Union[str, Any] =model.config.eos_token_id UpperCAmelCase : List[Any] =jax.jit(model.generate ) UpperCAmelCase : Dict =jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) UpperCAmelCase : Tuple =[ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(snake_case__ , snake_case__ ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : int =0 UpperCAmelCase : Optional[int] =1 UpperCAmelCase : Optional[int] =0 UpperCAmelCase : Union[str, Any] =1 UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval() UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ ) UpperCAmelCase : Union[str, Any] =fx_state with torch.no_grad(): UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case__ ) UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ ) UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : int =getattr(snake_case__ , snake_case__ ) UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval() UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params ) UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : str =0 UpperCAmelCase : Any =1 UpperCAmelCase : List[Any] =0 UpperCAmelCase : Tuple =1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case__ ) UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ ) with torch.no_grad(): UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : Tuple =model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ )
348
1
import math __snake_case = 10 __snake_case = 7 __snake_case = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase_ ( __lowerCAmelCase = 20 )-> str: '''simple docstring''' UpperCAmelCase : int =math.comb(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict =math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCAmelCase ) UpperCAmelCase : str =NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
348
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' if not numbers: return 0 if not isinstance(__lowerCAmelCase , (list, tuple) ) or not all( isinstance(__lowerCAmelCase , __lowerCAmelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) UpperCAmelCase : Tuple =numbers[0] for i in range(1 , len(__lowerCAmelCase ) ): # update the maximum and minimum subarray products UpperCAmelCase : Union[str, Any] =numbers[i] if number < 0: UpperCAmelCase , UpperCAmelCase : Optional[int] =min_till_now, max_till_now UpperCAmelCase : Tuple =max(__lowerCAmelCase , max_till_now * number ) UpperCAmelCase : Union[str, Any] =min(__lowerCAmelCase , min_till_now * number ) # update the maximum product found till now UpperCAmelCase : Optional[int] =max(__lowerCAmelCase , __lowerCAmelCase ) return max_prod
348
import os from typing import Dict, List, Tuple, TypeVar, Union __snake_case = TypeVar('''T''') __snake_case = Union[List[T], Tuple[T, ...]] __snake_case = Union[T, List[T], Dict[str, T]] __snake_case = Union[str, bytes, os.PathLike]
348
1
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __snake_case = logging.get_logger(__name__) __snake_case = TypeVar('''DatasetType''', Dataset, IterableDataset) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "first_exhausted" , )-> DatasetType: '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(__lowerCAmelCase ): if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ): if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' '''is an empty dataset dictionary.''' ) raise ValueError( f'''Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n''' f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCAmelCase ) )}\']''' ) raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.''' ) if i == 0: UpperCAmelCase , UpperCAmelCase : Optional[int] =( (Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' ) if dataset_type is Dataset: return _interleave_map_style_datasets( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase ) else: return _interleave_iterable_datasets( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , )-> DatasetType: '''simple docstring''' if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(__lowerCAmelCase ): if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ): if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' '''is an empty dataset dictionary.''' ) raise ValueError( f'''Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n''' f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCAmelCase ) )}\']''' ) raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.''' ) if i == 0: UpperCAmelCase , UpperCAmelCase : str =( (Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase ) else: return _concatenate_iterable_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
348
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } __snake_case = '''▁''' class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Dict = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = BigBirdTokenizer __lowerCamelCase : Any = ["""input_ids""", """attention_mask"""] __lowerCamelCase : List[int] = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase : Tuple =vocab_file UpperCAmelCase : Optional[int] =False if not self.vocab_file else True def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : int =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase : Optional[int] =os.path.join( snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
348
1
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __snake_case = 4 __snake_case = 3 class __snake_case ( lowerCamelCase__ ): pass def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]: '''simple docstring''' for shard in shards: for i in range(__lowerCAmelCase ): yield {"i": i, "shard": shard} def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =int(os.environ['''RANK'''] ) UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] ) UpperCAmelCase : List[Any] =ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCAmelCase ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase ) parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 ) UpperCAmelCase : Any =parser.parse_args() UpperCAmelCase : List[str] =args.streaming UpperCAmelCase : Tuple =args.num_workers UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]} UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase ) if not streaming: UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) ) UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase ) UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase ) UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCAmelCase : str =full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCAmelCase : List[Any] =sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
348
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool: UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCAmelCase : List[Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__lowerCAmelCase ) ) # The ratio of the area for circle to square is pi/4. UpperCAmelCase : Dict =proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float: '''simple docstring''' return mean( function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None: '''simple docstring''' def identity_function(__lowerCAmelCase ) -> float: return x UpperCAmelCase : List[Any] =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('''******************''' ) def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' def function_to_integrate(__lowerCAmelCase ) -> float: return sqrt(4.0 - x * x ) UpperCAmelCase : Dict =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool: UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCAmelCase : List[Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__lowerCAmelCase ) ) # The ratio of the area for circle to square is pi/4. UpperCAmelCase : Dict =proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float: '''simple docstring''' return mean( function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None: '''simple docstring''' def identity_function(__lowerCAmelCase ) -> float: return x UpperCAmelCase : List[Any] =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('''******************''' ) def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' def function_to_integrate(__lowerCAmelCase ) -> float: return sqrt(4.0 - x * x ) UpperCAmelCase : Dict =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
348
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : List[Any] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Union[str, Any] =use_input_mask UpperCAmelCase : Tuple =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : Tuple =hidden_size UpperCAmelCase : Dict =projection_dim UpperCAmelCase : Optional[int] =num_hidden_layers UpperCAmelCase : Dict =num_attention_heads UpperCAmelCase : int =intermediate_size UpperCAmelCase : Any =dropout UpperCAmelCase : Union[str, Any] =attention_dropout UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : str =scope UpperCAmelCase : str =bos_token_id def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : int =None if self.use_input_mask: UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Optional[int] =input_mask.numpy() UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : List[Any] =1 UpperCAmelCase : Tuple =0 UpperCAmelCase : List[Any] =self.get_config() return config, input_ids, tf.convert_to_tensor(snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ ) UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ ) UpperCAmelCase : str =model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () __lowerCamelCase : Dict = False __lowerCamelCase : Optional[Any] = False __lowerCamelCase : Dict = False def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : str =BlipTextModelTester(self ) UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__=True ) -> Any: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
348
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=lowerCamelCase__ ) class __snake_case ( lowerCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __lowerCamelCase : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __lowerCamelCase : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) __lowerCamelCase : ClassVar[Features] = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) __lowerCamelCase : str = "question" __lowerCamelCase : str = "context" __lowerCamelCase : str = "answers" @property def UpperCAmelCase__ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
348
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase ) UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase ) return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = CLIPConfig __lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""] def __init__( self , snake_case__ ) -> Dict: '''simple docstring''' super().__init__(snake_case__ ) UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config ) UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ ) UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ ) UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ ) @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy() UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy() UpperCAmelCase : Tuple =[] UpperCAmelCase : Dict =image_embeds.shape[0] for i in range(snake_case__ ): UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : str =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx] UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCAmelCase : int =0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCAmelCase : Any =cos_dist[i][concept_idx] UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item() UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case__ ) result.append(snake_case__ ) UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : List[str] =self.visual_projection(snake_case__ ) UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds ) UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : Optional[Any] =0.0 UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 ) UpperCAmelCase : List[Any] =special_care * 0.01 UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
348
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Any = ShapEPipeline __lowerCamelCase : Optional[int] = ["""prompt"""] __lowerCamelCase : Optional[int] = ["""prompt"""] __lowerCamelCase : Optional[int] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCamelCase : Dict = False @property def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return 8 @property def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Optional[int] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(snake_case__ ) @property def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } UpperCAmelCase : str =PriorTransformer(**snake_case__ ) return model @property def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } UpperCAmelCase : Any =ShapERenderer(**snake_case__ ) return model def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str =self.dummy_prior UpperCAmelCase : Optional[int] =self.dummy_text_encoder UpperCAmelCase : List[Any] =self.dummy_tokenizer UpperCAmelCase : Tuple =self.dummy_renderer UpperCAmelCase : Union[str, Any] =HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , ) UpperCAmelCase : Optional[int] ={ '''prior''': prior, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Optional[Any]: '''simple docstring''' if str(snake_case__ ).startswith('''mps''' ): UpperCAmelCase : Optional[int] =torch.manual_seed(snake_case__ ) else: UpperCAmelCase : Tuple =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase : Dict ={ '''prompt''': '''horse''', '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[str] ='''cpu''' UpperCAmelCase : Optional[Any] =self.get_dummy_components() UpperCAmelCase : List[str] =self.pipeline_class(**snake_case__ ) UpperCAmelCase : Union[str, Any] =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[str] =pipe(**self.get_dummy_inputs(snake_case__ ) ) UpperCAmelCase : Tuple =output.images[0] UpperCAmelCase : Dict =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCAmelCase : List[Any] =np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =torch_device == '''cpu''' UpperCAmelCase : Optional[int] =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Any =self.get_dummy_components() UpperCAmelCase : int =self.pipeline_class(**snake_case__ ) UpperCAmelCase : Union[str, Any] =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Optional[int] =1 UpperCAmelCase : Any =2 UpperCAmelCase : Optional[int] =self.get_dummy_inputs(snake_case__ ) for key in inputs.keys(): if key in self.batch_params: UpperCAmelCase : List[str] =batch_size * [inputs[key]] UpperCAmelCase : List[Any] =pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Dict =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_np_out.npy''' ) UpperCAmelCase : int =ShapEPipeline.from_pretrained('''openai/shap-e''' ) UpperCAmelCase : Optional[Any] =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Dict =torch.Generator(device=snake_case__ ).manual_seed(0 ) UpperCAmelCase : Optional[Any] =pipe( '''a shark''' , generator=snake_case__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
348
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __snake_case = parser.parse_args() __snake_case = '''cpu''' __snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __snake_case = '''path-to-your-trained-model''' __snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __snake_case = pipe.to(device) # to channels last __snake_case = pipe.unet.to(memory_format=torch.channels_last) __snake_case = pipe.vae.to(memory_format=torch.channels_last) __snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __snake_case = torch.randn(2, 4, 64, 64) __snake_case = torch.rand(1) * 9_99 __snake_case = torch.randn(2, 77, 7_68) __snake_case = (sample, timestep, encoder_hidden_status) try: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __snake_case = 6_66 __snake_case = torch.Generator(device).manual_seed(seed) __snake_case = {'''generator''': generator} if args.steps is not None: __snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
348
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor __snake_case = logging.get_logger(__name__) class __snake_case ( lowerCamelCase__ ): def __init__( self , *snake_case__ , **snake_case__ ) -> None: '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , snake_case__ , ) super().__init__(*snake_case__ , **snake_case__ )
348
__snake_case = '''Input must be a string of 8 numbers plus letter''' __snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE''' def lowerCAmelCase_ ( __lowerCAmelCase )-> bool: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}''' raise TypeError(__lowerCAmelCase ) UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper() if len(__lowerCAmelCase ) != 9: raise ValueError(__lowerCAmelCase ) try: UpperCAmelCase : int =int(spanish_id_clean[0:8] ) UpperCAmelCase : Optional[int] =spanish_id_clean[8] except ValueError as ex: raise ValueError(__lowerCAmelCase ) from ex if letter.isdigit(): raise ValueError(__lowerCAmelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
348
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = 1 @register_to_config def __init__( self , snake_case__=2000 , snake_case__=0.1 , snake_case__=20 , snake_case__=1e-3 ) -> int: '''simple docstring''' UpperCAmelCase : Any =None UpperCAmelCase : Optional[Any] =None UpperCAmelCase : Optional[int] =None def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , snake_case__ , device=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ) -> Any: '''simple docstring''' if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score UpperCAmelCase : Optional[Any] =( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) UpperCAmelCase : List[Any] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) UpperCAmelCase : Dict =std.flatten() while len(std.shape ) < len(score.shape ): UpperCAmelCase : Optional[Any] =std.unsqueeze(-1 ) UpperCAmelCase : Tuple =-score / std # compute UpperCAmelCase : Optional[Any] =-1.0 / len(self.timesteps ) UpperCAmelCase : Optional[Any] =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) UpperCAmelCase : Dict =beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): UpperCAmelCase : List[Any] =beta_t.unsqueeze(-1 ) UpperCAmelCase : int =-0.5 * beta_t * x UpperCAmelCase : Optional[Any] =torch.sqrt(snake_case__ ) UpperCAmelCase : Optional[int] =drift - diffusion**2 * score UpperCAmelCase : Dict =x + drift * dt # add noise UpperCAmelCase : Dict =randn_tensor(x.shape , layout=x.layout , generator=snake_case__ , device=x.device , dtype=x.dtype ) UpperCAmelCase : Union[str, Any] =x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ) -> Dict: '''simple docstring''' return self.config.num_train_timesteps
348
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:] if shift_amount >= len(__lowerCAmelCase ): return "0b0" UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:] else: # Get binary (2's complement) representation of negative number UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Optional[Any] =( '''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number ) if shift_amount >= len(__lowerCAmelCase ): return "0b" + binary_number[0] * len(__lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
from __future__ import annotations def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(__lowerCAmelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(__lowerCAmelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
348
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) # TODO Update this __snake_case = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """esm""" def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =vocab_size UpperCAmelCase : str =hidden_size UpperCAmelCase : List[Any] =num_hidden_layers UpperCAmelCase : Optional[Any] =num_attention_heads UpperCAmelCase : str =intermediate_size UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : int =attention_probs_dropout_prob UpperCAmelCase : Dict =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : Union[str, Any] =layer_norm_eps UpperCAmelCase : Dict =position_embedding_type UpperCAmelCase : Optional[Any] =use_cache UpperCAmelCase : int =emb_layer_norm_before UpperCAmelCase : List[str] =token_dropout UpperCAmelCase : Optional[Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase : Optional[Any] =EsmFoldConfig() elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ ) UpperCAmelCase : Tuple =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase : Any =get_default_vocab_list() else: UpperCAmelCase : Tuple =vocab_list else: UpperCAmelCase : Optional[int] =None UpperCAmelCase : Union[str, Any] =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , snake_case__ ): UpperCAmelCase : str =self.esmfold_config.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : str = None __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : float = 0 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : int = 128 __lowerCamelCase : "TrunkConfig" = None def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.trunk is None: UpperCAmelCase : str =TrunkConfig() elif isinstance(self.trunk , snake_case__ ): UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =asdict(self ) UpperCAmelCase : Any =self.trunk.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 48 __lowerCamelCase : int = 1024 __lowerCamelCase : int = 128 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : float = 0 __lowerCamelCase : float = 0 __lowerCamelCase : bool = False __lowerCamelCase : int = 4 __lowerCamelCase : Optional[int] = 128 __lowerCamelCase : "StructureModuleConfig" = None def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' if self.structure_module is None: UpperCAmelCase : Any =StructureModuleConfig() elif isinstance(self.structure_module , snake_case__ ): UpperCAmelCase : str =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =asdict(self ) UpperCAmelCase : Tuple =self.structure_module.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 384 __lowerCamelCase : int = 128 __lowerCamelCase : int = 16 __lowerCamelCase : int = 128 __lowerCamelCase : int = 12 __lowerCamelCase : int = 4 __lowerCamelCase : int = 8 __lowerCamelCase : float = 0.1 __lowerCamelCase : int = 8 __lowerCamelCase : int = 1 __lowerCamelCase : int = 2 __lowerCamelCase : int = 7 __lowerCamelCase : int = 10 __lowerCamelCase : float = 1E-8 __lowerCamelCase : float = 1E5 def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return asdict(self ) def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
348
1
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:] if shift_amount >= len(__lowerCAmelCase ): return "0b0" UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:] else: # Get binary (2's complement) representation of negative number UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Optional[Any] =( '''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number ) if shift_amount >= len(__lowerCAmelCase ): return "0b" + binary_number[0] * len(__lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
348
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,) __lowerCamelCase : List[str] = 10 def UpperCAmelCase__ ( self , **snake_case__ ) -> str: '''simple docstring''' UpperCAmelCase : int ={ '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**snake_case__ ) return config def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : str =self.dummy_model() UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Any =model(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : int =output.prev_sample UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config() UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : Optional[int] =self.dummy_model() UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : str =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =output.prev_sample UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : List[Any] =self.scheduler_classes[0] UpperCAmelCase : Dict =self.get_scheduler_config() UpperCAmelCase : List[str] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ ) UpperCAmelCase : int =self.dummy_model() UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : int =model(snake_case__ , snake_case__ ) UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =output.prev_sample UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) ) if str(snake_case__ ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
348
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __snake_case = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ViTFeatureExtractor'''] __snake_case = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id ) UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean() UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item()) UpperCAmelCase : List[str] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
1
__snake_case = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.602176634e-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.35_5818, } def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float: '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCAmelCase : List[str] =( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {', '.join(__lowerCAmelCase )}''' ) raise ValueError(__lowerCAmelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
348
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[Any] =ort.SessionOptions() UpperCAmelCase : Optional[int] =False return options def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Dict ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : Any =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : str =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : int =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
348
1
import argparse import os import re __snake_case = '''src/transformers''' # Pattern that looks at the indentation in a line. __snake_case = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. __snake_case = re.compile(r'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __snake_case = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. __snake_case = re.compile(r'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __snake_case = re.compile(r'''\[([^\]]+)\]''') def lowerCAmelCase_ ( __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Any =_re_indent.search(__lowerCAmelCase ) return "" if search is None else search.groups()[0] def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , __lowerCAmelCase=None )-> str: '''simple docstring''' UpperCAmelCase : Optional[int] =0 UpperCAmelCase : Dict =code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(__lowerCAmelCase ): index += 1 UpperCAmelCase : Optional[int] =['''\n'''.join(lines[:index] )] else: UpperCAmelCase : Union[str, Any] =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). UpperCAmelCase : int =[lines[index]] index += 1 while index < len(__lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(__lowerCAmelCase ) ) if index < len(__lowerCAmelCase ) - 1: UpperCAmelCase : Union[str, Any] =[lines[index + 1]] index += 1 else: UpperCAmelCase : Dict =[] else: blocks.append('''\n'''.join(__lowerCAmelCase ) ) UpperCAmelCase : Union[str, Any] =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__lowerCAmelCase ) > 0: blocks.append('''\n'''.join(__lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__lowerCAmelCase ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' def _inner(__lowerCAmelCase ): return key(__lowerCAmelCase ).lower().replace('''_''' , '''''' ) return _inner def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None )-> int: '''simple docstring''' def noop(__lowerCAmelCase ): return x if key is None: UpperCAmelCase : Optional[int] =noop # Constants are all uppercase, they go first. UpperCAmelCase : Any =[obj for obj in objects if key(__lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. UpperCAmelCase : Tuple =[obj for obj in objects if key(__lowerCAmelCase )[0].isupper() and not key(__lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. UpperCAmelCase : int =[obj for obj in objects if not key(__lowerCAmelCase )[0].isupper()] UpperCAmelCase : Union[str, Any] =ignore_underscore(__lowerCAmelCase ) return sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' def _replace(__lowerCAmelCase ): UpperCAmelCase : List[Any] =match.groups()[0] if "," not in imports: return f'''[{imports}]''' UpperCAmelCase : List[str] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase : Optional[Any] =keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] ) + "]" UpperCAmelCase : Dict =import_statement.split('''\n''' ) if len(__lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. UpperCAmelCase : int =2 if lines[1].strip() == '''[''' else 1 UpperCAmelCase : List[Any] =[(i, _re_strip_line.search(__lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] UpperCAmelCase : List[Any] =sort_objects(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] ) UpperCAmelCase : Dict =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: UpperCAmelCase : Any =_re_bracket_content.sub(_replace , lines[1] ) else: UpperCAmelCase : List[str] =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase : Optional[Any] =keys[:-1] UpperCAmelCase : Optional[int] =get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] ) return "\n".join(__lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line UpperCAmelCase : Union[str, Any] =_re_bracket_content.sub(_replace , __lowerCAmelCase ) return import_statement def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=True )-> Any: '''simple docstring''' with open(__lowerCAmelCase , encoding='''utf-8''' ) as f: UpperCAmelCase : Dict =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 UpperCAmelCase : List[str] =split_code_in_indented_blocks( __lowerCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. UpperCAmelCase : Dict =main_blocks[block_idx] UpperCAmelCase : Optional[Any] =block.split('''\n''' ) # Get to the start of the imports. UpperCAmelCase : List[Any] =0 while line_idx < len(__lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: UpperCAmelCase : str =len(__lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(__lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. UpperCAmelCase : Optional[Any] ='''\n'''.join(block_lines[line_idx:-1] ) UpperCAmelCase : Any =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. UpperCAmelCase : List[str] =split_code_in_indented_blocks(__lowerCAmelCase , indent_level=__lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend UpperCAmelCase : Dict =_re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. UpperCAmelCase : Optional[Any] =[(pattern.search(__lowerCAmelCase ).groups()[0] if pattern.search(__lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. UpperCAmelCase : Tuple =[(i, key) for i, key in enumerate(__lowerCAmelCase ) if key is not None] UpperCAmelCase : str =[x[0] for x in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. UpperCAmelCase : str =0 UpperCAmelCase : Any =[] for i in range(len(__lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: UpperCAmelCase : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(__lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. UpperCAmelCase : Optional[Any] ='''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(__lowerCAmelCase ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(__lowerCAmelCase ) ) def lowerCAmelCase_ ( __lowerCAmelCase=True )-> List[str]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =[] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: UpperCAmelCase : int =sort_imports(os.path.join(__lowerCAmelCase , '''__init__.py''' ) , check_only=__lowerCAmelCase ) if result: UpperCAmelCase : str =[os.path.join(__lowerCAmelCase , '''__init__.py''' )] if len(__lowerCAmelCase ) > 0: raise ValueError(f'''Would overwrite {len(__lowerCAmelCase )} files, run `make style`.''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') __snake_case = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
348
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase_ ( )-> int: '''simple docstring''' UpperCAmelCase : str ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase ) return dataset class __snake_case ( lowerCamelCase__ ): def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =get_dataset() UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str =get_dataset() UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ ) self.assertEqual(len(snake_case__ ) , 2 ) print(snake_case__ ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
348
1
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 )-> Union[str, Any]: '''simple docstring''' if name is None: UpperCAmelCase : Tuple =None else: UpperCAmelCase : Dict ='''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}''' UpperCAmelCase : Optional[Any] =fmt.format(__lowerCAmelCase ) # Print and recurse (if needed). if isinstance(__lowerCAmelCase , __lowerCAmelCase ): if msg is not None: print(__lowerCAmelCase ) for k in val.keys(): recursive_print(__lowerCAmelCase , val[k] , spaces + 2 ) elif isinstance(__lowerCAmelCase , torch.Tensor ): print(__lowerCAmelCase , ''':''' , val.size() ) else: print(__lowerCAmelCase , ''':''' , __lowerCAmelCase ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]: '''simple docstring''' UpperCAmelCase : Tuple =param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] UpperCAmelCase : Optional[Any] =(num_heads, hidden_size, num_splits) + input_shape[1:] UpperCAmelCase : Dict =param.view(*__lowerCAmelCase ) UpperCAmelCase : Optional[int] =param.transpose(0 , 2 ) UpperCAmelCase : Dict =param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] UpperCAmelCase : List[str] =(num_heads, num_splits, hidden_size) + input_shape[1:] UpperCAmelCase : List[Any] =param.view(*__lowerCAmelCase ) UpperCAmelCase : str =param.transpose(0 , 1 ).contiguous() UpperCAmelCase : Optional[int] =param.view(*__lowerCAmelCase ) return param def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[Any] ={} # old versions did not store training args UpperCAmelCase : List[str] =input_state_dict.get('''args''' , __lowerCAmelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) UpperCAmelCase : List[str] =ds_args.padded_vocab_size UpperCAmelCase : Tuple =ds_args.max_position_embeddings UpperCAmelCase : List[str] =ds_args.hidden_size UpperCAmelCase : str =ds_args.num_layers UpperCAmelCase : Tuple =ds_args.num_attention_heads UpperCAmelCase : List[Any] =ds_args.ffn_hidden_size # pprint(config) # The number of heads. UpperCAmelCase : Optional[int] =config.n_head # The hidden_size per head. UpperCAmelCase : Any =config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): UpperCAmelCase : Tuple =input_state_dict['''checkpoint_version'''] else: UpperCAmelCase : str =0.0 # The model. UpperCAmelCase : List[Any] =input_state_dict['''model'''] # The language model. UpperCAmelCase : int =model['''language_model'''] # The embeddings. UpperCAmelCase : List[Any] =lm['''embedding'''] # The word embeddings. UpperCAmelCase : List[str] =embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. UpperCAmelCase : int =word_embeddings[: config.vocab_size, :] UpperCAmelCase : Any =word_embeddings # The position embeddings. UpperCAmelCase : Union[str, Any] =embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] UpperCAmelCase : List[Any] =pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. UpperCAmelCase : Optional[Any] =pos_embeddings # The transformer. UpperCAmelCase : int =lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. UpperCAmelCase : Any =re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. UpperCAmelCase : Optional[Any] ={ '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. UpperCAmelCase : Tuple =layer_re.match(__lowerCAmelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. UpperCAmelCase : str =int(m.group(1 ) ) # The name of the operation. UpperCAmelCase : Optional[Any] =m.group(2 ) # Is it a weight or a bias? UpperCAmelCase : int =m.group(3 ) # The name of the layer. UpperCAmelCase : int =f'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): UpperCAmelCase : List[Any] ='''ln_1''' if op_name.startswith('''input''' ) else '''ln_2''' UpperCAmelCase : int =val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. UpperCAmelCase : List[Any] =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Optional[Any] =causal_mask # Insert a "dummy" tensor for masked_bias. UpperCAmelCase : Optional[int] =torch.tensor(-1e4 , dtype=torch.floataa ) UpperCAmelCase : int =masked_bias UpperCAmelCase : Optional[Any] =fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. UpperCAmelCase : Any =out_val.transpose(0 , 1 ).contiguous() # Store. UpperCAmelCase : Optional[int] =out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": UpperCAmelCase : int =fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase ) # Store. No change of shape. UpperCAmelCase : Dict =out_val # Transpose the weights. elif weight_or_bias == "weight": UpperCAmelCase : Dict =megatron_to_transformers[op_name] UpperCAmelCase : Optional[Any] =val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": UpperCAmelCase : Dict =megatron_to_transformers[op_name] UpperCAmelCase : List[str] =val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. UpperCAmelCase : List[Any] =transformer['''final_layernorm.weight'''] UpperCAmelCase : List[str] =transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. UpperCAmelCase : List[str] =word_embeddings # It should be done! return output_state_dict def lowerCAmelCase_ ( )-> int: '''simple docstring''' UpperCAmelCase : Dict =argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=__lowerCAmelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=__lowerCAmelCase , help='''An optional config json file describing the pre-trained model.''' , ) UpperCAmelCase : Optional[int] =parser.parse_args() # Extract the basename. UpperCAmelCase : List[Any] =os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: UpperCAmelCase : Tuple =torch.load(__lowerCAmelCase , map_location='''cpu''' ) else: UpperCAmelCase : Union[str, Any] =torch.load(args.path_to_checkpoint , map_location='''cpu''' ) UpperCAmelCase : int =input_state_dict.get('''args''' , __lowerCAmelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: UpperCAmelCase : Tuple ='''gelu_fast''' elif ds_args.openai_gelu: UpperCAmelCase : Any ='''gelu_new''' else: UpperCAmelCase : List[str] ='''gelu''' else: # in the very early days this used to be "gelu_new" UpperCAmelCase : List[Any] ='''gelu_new''' # Spell out all parameters in case the defaults change. UpperCAmelCase : int =GPTaConfig( vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_02_56 , eos_token_id=5_02_56 , ) else: UpperCAmelCase : Union[str, Any] =GPTaConfig.from_json_file(args.config_file ) UpperCAmelCase : int =['''GPT2LMHeadModel'''] # Convert. print('''Converting''' ) UpperCAmelCase : Union[str, Any] =convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__lowerCAmelCase , __lowerCAmelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: UpperCAmelCase : str =ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": UpperCAmelCase : Dict ='''gpt2''' elif tokenizer_type == "PretrainedFromHF": UpperCAmelCase : Union[str, Any] =ds_args.tokenizer_name_or_path else: raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: UpperCAmelCase : Optional[Any] ='''gpt2''' UpperCAmelCase : List[Any] =AutoTokenizer.from_pretrained(__lowerCAmelCase ) UpperCAmelCase : int =type(__lowerCAmelCase ).__name__ UpperCAmelCase : Dict =tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(__lowerCAmelCase ) # Save tokenizer based on args print(f'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(__lowerCAmelCase ) # Store the state_dict to file. UpperCAmelCase : Optional[int] =os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) print(f'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
348
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str: '''simple docstring''' super().__init__() UpperCAmelCase : Optional[Any] =learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ ) else: UpperCAmelCase : Union[str, Any] =None UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : VQModel __lowerCamelCase : CLIPTextModel __lowerCamelCase : CLIPTokenizer __lowerCamelCase : TransformeraDModel __lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings __lowerCamelCase : VQDiffusionScheduler def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int: '''simple docstring''' super().__init__() self.register_modules( vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1 # get prompt text embeddings UpperCAmelCase : Optional[int] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) UpperCAmelCase : int =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate text embeddings for each generation per prompt UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 ) else: UpperCAmelCase : str =[''''''] * batch_size UpperCAmelCase : Tuple =text_input_ids.shape[-1] UpperCAmelCase : Optional[Any] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , ) UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1] UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 ) UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =1 elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Tuple =len(snake_case__ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' ) UpperCAmelCase : Tuple =batch_size * num_images_per_prompt UpperCAmelCase : List[str] =guidance_scale > 1.0 UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(snake_case__ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1 UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCAmelCase : Any =latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(snake_case__ , device=self.device ) UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device ) UpperCAmelCase : Optional[int] =latents for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 ) UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ ) UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase : Optional[Any] =model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ ) UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ ) UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ ) UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ ) UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase : int =keep_mask[:, :-1, :] UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase : Dict =log_p_x_0.clone() UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0) return rv
348
1
from random import shuffle import tensorflow as tf from numpy import array def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =int(__lowerCAmelCase ) assert noofclusters < len(__lowerCAmelCase ) # Find out the dimensionality UpperCAmelCase : Optional[int] =len(vectors[0] ) # Will help select random centroids from among the available vectors UpperCAmelCase : Tuple =list(range(len(__lowerCAmelCase ) ) ) shuffle(__lowerCAmelCase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. UpperCAmelCase : List[Any] =tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION UpperCAmelCase : Union[str, Any] =tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points UpperCAmelCase : Tuple =[ tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCAmelCase ) ] ##These nodes will assign the centroid Variables the appropriate ##values UpperCAmelCase : int =tf.placeholder('''float64''' , [dim] ) UpperCAmelCase : List[str] =[] for centroid in centroids: cent_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) UpperCAmelCase : Union[str, Any] =[tf.Variable(0 ) for i in range(len(__lowerCAmelCase ) )] ##These nodes will assign an assignment Variable the appropriate ##value UpperCAmelCase : int =tf.placeholder('''int32''' ) UpperCAmelCase : int =[] for assignment in assignments: cluster_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input UpperCAmelCase : Any =tf.placeholder('''float''' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors UpperCAmelCase : str =tf.reduce_mean(__lowerCAmelCase , 0 ) ##Node for computing Euclidean distances # Placeholders for input UpperCAmelCase : Any =tf.placeholder('''float''' , [dim] ) UpperCAmelCase : str =tf.placeholder('''float''' , [dim] ) UpperCAmelCase : str =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCAmelCase , __lowerCAmelCase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input UpperCAmelCase : str =tf.placeholder('''float''' , [noofclusters] ) UpperCAmelCase : Tuple =tf.argmin(__lowerCAmelCase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. UpperCAmelCase : List[str] =tf.initialize_all_variables() # Initialize all variables sess.run(__lowerCAmelCase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. UpperCAmelCase : Any =1_00 for _ in range(__lowerCAmelCase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(__lowerCAmelCase ) ): UpperCAmelCase : str =vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. UpperCAmelCase : List[Any] =[ sess.run(__lowerCAmelCase , feed_dict={va: vect, va: sess.run(__lowerCAmelCase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input UpperCAmelCase : Tuple =sess.run( __lowerCAmelCase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(__lowerCAmelCase ): # Collect all the vectors assigned to this cluster UpperCAmelCase : List[Any] =[ vectors[i] for i in range(len(__lowerCAmelCase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location UpperCAmelCase : List[str] =sess.run( __lowerCAmelCase , feed_dict={mean_input: array(__lowerCAmelCase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments UpperCAmelCase : List[Any] =sess.run(__lowerCAmelCase ) UpperCAmelCase : List[str] =sess.run(__lowerCAmelCase ) return centroids, assignments
348
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =self.dummy_uncond_unet UpperCAmelCase : Optional[int] =KarrasVeScheduler() UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[str] =torch.manual_seed(0 ) UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : str =torch.manual_seed(0 ) UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0] UpperCAmelCase : Any =image[0, -3:, -3:, -1] UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256''' UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ ) UpperCAmelCase : Dict =KarrasVeScheduler() UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Any =torch.manual_seed(0 ) UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
348
1
from __future__ import annotations def lowerCAmelCase_ ( __lowerCAmelCase )-> float: '''simple docstring''' UpperCAmelCase : Dict =0.00 UpperCAmelCase : Optional[Any] =0 for resistor in resistors: if resistor <= 0: UpperCAmelCase : Optional[Any] =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__lowerCAmelCase ) first_sum += 1 / float(__lowerCAmelCase ) index += 1 return 1 / first_sum def lowerCAmelCase_ ( __lowerCAmelCase )-> float: '''simple docstring''' UpperCAmelCase : Optional[int] =0.00 UpperCAmelCase : List[str] =0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCAmelCase : List[Any] =f'''Resistor at index {index} has a negative value!''' raise ValueError(__lowerCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
348
import qiskit def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' ) UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = half_adder(1, 1) print(f'Half Adder Output Qubit Counts: {counts}')
348
1
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool: '''simple docstring''' UpperCAmelCase : Tuple =(1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowerCAmelCase_ ( __lowerCAmelCase = 50_00 )-> int: '''simple docstring''' UpperCAmelCase : Optional[Any] =[(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCAmelCase )] for i, pentagonal_i in enumerate(__lowerCAmelCase ): for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ): UpperCAmelCase : List[str] =pentagonal_nums[j] UpperCAmelCase : Union[str, Any] =pentagonal_i + pentagonal_j UpperCAmelCase : Any =pentagonal_j - pentagonal_i if is_pentagonal(__lowerCAmelCase ) and is_pentagonal(__lowerCAmelCase ): return b return -1 if __name__ == "__main__": print(f'{solution() = }')
348
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __snake_case : __lowerCamelCase : str = BlenderbotConfig __lowerCamelCase : Optional[Any] = {} __lowerCamelCase : Optional[int] = """gelu""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : Dict =seq_length UpperCAmelCase : Optional[Any] =is_training UpperCAmelCase : List[str] =use_labels UpperCAmelCase : List[Any] =vocab_size UpperCAmelCase : Optional[int] =hidden_size UpperCAmelCase : Tuple =num_hidden_layers UpperCAmelCase : Any =num_attention_heads UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : Optional[int] =attention_probs_dropout_prob UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : List[Any] =eos_token_id UpperCAmelCase : Optional[int] =pad_token_id UpperCAmelCase : Tuple =bos_token_id def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[Any] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder() UpperCAmelCase : Any =inputs_dict['''input_ids'''] UpperCAmelCase : str =input_ids[:1, :] UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase : Tuple =inputs_dict['''head_mask'''] UpperCAmelCase : List[Any] =1 # first forward pass UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str: '''simple docstring''' if attention_mask is None: UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : Tuple =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase : Dict = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] =TFBlenderbotModelTester(self ) UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class __snake_case ( unittest.TestCase ): __lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""] __lowerCamelCase : Dict = """facebook/blenderbot-400M-distill""" @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' ) UpperCAmelCase : Optional[int] =self.model.generate( model_inputs.input_ids , ) UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
348
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' debug_launcher(test_script.main ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' debug_launcher(test_ops.main )
348
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = """sew-d""" def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int: '''simple docstring''' super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) UpperCAmelCase : Union[str, Any] =hidden_size UpperCAmelCase : Union[str, Any] =feat_extract_norm UpperCAmelCase : Optional[Any] =feat_extract_activation UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : int =list(snake_case__ ) UpperCAmelCase : List[str] =list(snake_case__ ) UpperCAmelCase : str =conv_bias UpperCAmelCase : Tuple =num_conv_pos_embeddings UpperCAmelCase : Dict =num_conv_pos_embedding_groups UpperCAmelCase : str =len(self.conv_dim ) UpperCAmelCase : Dict =num_hidden_layers UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : List[Any] =squeeze_factor UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : int =position_buckets UpperCAmelCase : Optional[int] =share_att_key UpperCAmelCase : Optional[int] =relative_attention UpperCAmelCase : Tuple =norm_rel_ebd UpperCAmelCase : List[Any] =list(snake_case__ ) UpperCAmelCase : Dict =hidden_act UpperCAmelCase : Optional[int] =num_attention_heads UpperCAmelCase : Any =hidden_dropout UpperCAmelCase : str =attention_dropout UpperCAmelCase : Union[str, Any] =activation_dropout UpperCAmelCase : str =feat_proj_dropout UpperCAmelCase : Union[str, Any] =final_dropout UpperCAmelCase : Optional[int] =layer_norm_eps UpperCAmelCase : str =feature_layer_norm_eps UpperCAmelCase : str =initializer_range UpperCAmelCase : Any =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Union[str, Any] =apply_spec_augment UpperCAmelCase : Optional[Any] =mask_time_prob UpperCAmelCase : Tuple =mask_time_length UpperCAmelCase : str =mask_time_min_masks UpperCAmelCase : Optional[int] =mask_feature_prob UpperCAmelCase : Optional[Any] =mask_feature_length UpperCAmelCase : List[Any] =mask_feature_min_masks # ctc loss UpperCAmelCase : str =ctc_loss_reduction UpperCAmelCase : Optional[int] =ctc_zero_infinity # sequence classification UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum UpperCAmelCase : int =classifier_proj_size @property def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
348
1
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Dict =AutoConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) UpperCAmelCase : int =AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
348
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __snake_case = 4 __snake_case = 3 class __snake_case ( lowerCamelCase__ ): pass def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]: '''simple docstring''' for shard in shards: for i in range(__lowerCAmelCase ): yield {"i": i, "shard": shard} def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =int(os.environ['''RANK'''] ) UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] ) UpperCAmelCase : List[Any] =ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCAmelCase ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase ) parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 ) UpperCAmelCase : Any =parser.parse_args() UpperCAmelCase : List[str] =args.streaming UpperCAmelCase : Tuple =args.num_workers UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]} UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase ) if not streaming: UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) ) UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase ) UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase ) UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCAmelCase : str =full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCAmelCase : List[Any] =sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
348
1
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) UpperCAmelCase : List[str] =str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" UpperCAmelCase : Optional[int] =max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
348
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __snake_case = parser.parse_args() __snake_case = '''cpu''' __snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __snake_case = '''path-to-your-trained-model''' __snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __snake_case = pipe.to(device) # to channels last __snake_case = pipe.unet.to(memory_format=torch.channels_last) __snake_case = pipe.vae.to(memory_format=torch.channels_last) __snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __snake_case = torch.randn(2, 4, 64, 64) __snake_case = torch.rand(1) * 9_99 __snake_case = torch.randn(2, 77, 7_68) __snake_case = (sample, timestep, encoder_hidden_status) try: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __snake_case = 6_66 __snake_case = torch.Generator(device).manual_seed(seed) __snake_case = {'''generator''': generator} if args.steps is not None: __snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
348
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __snake_case : def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str: '''simple docstring''' UpperCAmelCase : str =parent UpperCAmelCase : Tuple =batch_size UpperCAmelCase : Optional[int] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Tuple =use_input_mask UpperCAmelCase : List[Any] =use_token_type_ids UpperCAmelCase : Optional[Any] =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : List[Any] =hidden_size UpperCAmelCase : Optional[int] =rotary_dim UpperCAmelCase : Union[str, Any] =num_hidden_layers UpperCAmelCase : List[Any] =num_attention_heads UpperCAmelCase : Dict =intermediate_size UpperCAmelCase : Union[str, Any] =hidden_act UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : Dict =attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : str =initializer_range UpperCAmelCase : Optional[int] =None UpperCAmelCase : List[Any] =vocab_size - 1 UpperCAmelCase : Optional[Any] =vocab_size - 1 UpperCAmelCase : List[Any] =vocab_size - 1 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] =None if self.use_input_mask: UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Dict =GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =20 UpperCAmelCase : Any =model_class_name(snake_case__ ) UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : Optional[Any] =model( input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , ) UpperCAmelCase : List[Any] =model(snake_case__ ) UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Dict =20 UpperCAmelCase : Dict =model_class_name(snake_case__ ) UpperCAmelCase : Tuple =jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ ) UpperCAmelCase : int =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) UpperCAmelCase : Optional[Any] =model( input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase : str =model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , ) UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) @tooslow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ ) UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : str =False UpperCAmelCase : Union[str, Any] =model.config.eos_token_id UpperCAmelCase : List[Any] =jax.jit(model.generate ) UpperCAmelCase : Dict =jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) UpperCAmelCase : Tuple =[ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(snake_case__ , snake_case__ ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : int =0 UpperCAmelCase : Optional[int] =1 UpperCAmelCase : Optional[int] =0 UpperCAmelCase : Union[str, Any] =1 UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval() UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ ) UpperCAmelCase : Union[str, Any] =fx_state with torch.no_grad(): UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case__ ) UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ ) UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase : int =getattr(snake_case__ , snake_case__ ) UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval() UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa ) UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params ) UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : str =0 UpperCAmelCase : Any =1 UpperCAmelCase : List[Any] =0 UpperCAmelCase : Tuple =1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple() UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case__ ) UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ ) with torch.no_grad(): UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple() self.assertEqual( len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(snake_case__ , snake_case__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) UpperCAmelCase : Tuple =model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ )
348
1
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =OmegaConf.load(__lowerCAmelCase ) if display: print(yaml.dump(OmegaConf.to_container(__lowerCAmelCase ) ) ) return config def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None )-> Tuple: '''simple docstring''' if conf_path is None: UpperCAmelCase : Any ='''./model_checkpoints/vqgan_only.yaml''' UpperCAmelCase : str =load_config(__lowerCAmelCase , display=__lowerCAmelCase ) UpperCAmelCase : Tuple =VQModel(**config.model.params ) if ckpt_path is None: UpperCAmelCase : List[str] ='''./model_checkpoints/vqgan_only.pt''' UpperCAmelCase : int =torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) if ".ckpt" in ckpt_path: UpperCAmelCase : str =sd['''state_dict'''] model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) model.to(__lowerCAmelCase ) del sd return model def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] =model.encode(__lowerCAmelCase ) print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' ) UpperCAmelCase : Optional[Any] =model.decode(__lowerCAmelCase ) return xrec def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False )-> Optional[int]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple =string.rsplit('''.''' , 1 ) if reload: UpperCAmelCase : List[Any] =importlib.import_module(__lowerCAmelCase ) importlib.reload(__lowerCAmelCase ) return getattr(importlib.import_module(__lowerCAmelCase , package=__lowerCAmelCase ) , cls ) def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]: '''simple docstring''' if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''' ) return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True )-> str: '''simple docstring''' UpperCAmelCase : List[str] =instantiate_from_config(__lowerCAmelCase ) if sd is not None: model.load_state_dict(__lowerCAmelCase ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' if ckpt: UpperCAmelCase : List[str] =torch.load(__lowerCAmelCase , map_location='''cpu''' ) UpperCAmelCase : List[Any] =pl_sd['''global_step'''] print(f'''loaded model from global step {global_step}.''' ) else: UpperCAmelCase : int ={'''state_dict''': None} UpperCAmelCase : str =None UpperCAmelCase : Dict =load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__lowerCAmelCase , eval_mode=__lowerCAmelCase )['''model'''] return model, global_step
348
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { '''configuration_table_transformer''': [ '''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TableTransformerConfig''', '''TableTransformerOnnxConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TableTransformerForObjectDetection''', '''TableTransformerModel''', '''TableTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
import os from typing import Dict, List, Tuple, TypeVar, Union __snake_case = TypeVar('''T''') __snake_case = Union[List[T], Tuple[T, ...]] __snake_case = Union[T, List[T], Dict[str, T]] __snake_case = Union[str, bytes, os.PathLike]
348
1
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = (DDPMParallelScheduler,) def UpperCAmelCase__ ( self , **snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] ={ '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**snake_case__ ) return config def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.scheduler_classes[0] UpperCAmelCase : int =self.get_scheduler_config() UpperCAmelCase : str =scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.scheduler_classes[0] UpperCAmelCase : str =self.get_scheduler_config() UpperCAmelCase : List[str] =scheduler_class(**snake_case__ ) UpperCAmelCase : str =len(snake_case__ ) UpperCAmelCase : Tuple =self.dummy_model() UpperCAmelCase : Any =self.dummy_sample_deter UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter + 0.1 UpperCAmelCase : List[str] =self.dummy_sample_deter - 0.1 UpperCAmelCase : Optional[Any] =samplea.shape[0] UpperCAmelCase : List[str] =torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCAmelCase : Optional[int] =torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ ) UpperCAmelCase : Tuple =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCAmelCase : List[Any] =scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCAmelCase : Optional[Any] =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 1153.1833 ) < 1e-2 assert abs(result_mean.item() - 0.5005 ) < 1e-3 def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =self.scheduler_classes[0] UpperCAmelCase : List[str] =self.get_scheduler_config() UpperCAmelCase : Optional[int] =scheduler_class(**snake_case__ ) UpperCAmelCase : Optional[Any] =len(snake_case__ ) UpperCAmelCase : Any =self.dummy_model() UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter UpperCAmelCase : Union[str, Any] =torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual UpperCAmelCase : List[Any] =model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase : Dict =scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample UpperCAmelCase : Tuple =pred_prev_sample UpperCAmelCase : Tuple =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Optional[int] =torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : List[str] =self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) UpperCAmelCase : List[Any] =len(snake_case__ ) UpperCAmelCase : Tuple =self.dummy_model() UpperCAmelCase : int =self.dummy_sample_deter UpperCAmelCase : Optional[int] =torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual UpperCAmelCase : Optional[int] =model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase : Optional[int] =scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample UpperCAmelCase : str =pred_prev_sample UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : List[Any] =torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =self.scheduler_classes[0] UpperCAmelCase : Optional[Any] =self.get_scheduler_config() UpperCAmelCase : Any =scheduler_class(**snake_case__ ) UpperCAmelCase : List[str] =[100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case__ ) UpperCAmelCase : Dict =scheduler.timesteps for i, timestep in enumerate(snake_case__ ): if i == len(snake_case__ ) - 1: UpperCAmelCase : str =-1 else: UpperCAmelCase : Union[str, Any] =timesteps[i + 1] UpperCAmelCase : str =scheduler.previous_timestep(snake_case__ ) UpperCAmelCase : int =prev_t.item() self.assertEqual(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : str =self.get_scheduler_config() UpperCAmelCase : Optional[int] =scheduler_class(**snake_case__ ) UpperCAmelCase : Dict =[100, 87, 50, 51, 0] with self.assertRaises(snake_case__ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : int =self.scheduler_classes[0] UpperCAmelCase : Tuple =self.get_scheduler_config() UpperCAmelCase : Union[str, Any] =scheduler_class(**snake_case__ ) UpperCAmelCase : Union[str, Any] =[100, 87, 50, 1, 0] UpperCAmelCase : Optional[Any] =len(snake_case__ ) with self.assertRaises(snake_case__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : Any =self.get_scheduler_config() UpperCAmelCase : Tuple =scheduler_class(**snake_case__ ) UpperCAmelCase : Dict =[scheduler.config.num_train_timesteps] with self.assertRaises( snake_case__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=snake_case__ )
348
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } __snake_case = '''▁''' class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Dict = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = BigBirdTokenizer __lowerCamelCase : Any = ["""input_ids""", """attention_mask"""] __lowerCamelCase : List[int] = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase : Tuple =vocab_file UpperCAmelCase : Optional[int] =False if not self.vocab_file else True def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : int =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] =[self.sep_token_id] UpperCAmelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(snake_case__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase : Optional[int] =os.path.join( snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
348
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Union[str, Any] = KandinskyInpaintPipeline __lowerCamelCase : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] __lowerCamelCase : Tuple = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] __lowerCamelCase : List[str] = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] __lowerCamelCase : Dict = False @property def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return self.time_input_dim @property def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return 100 @property def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Tuple =MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCAmelCase : Dict =MultilingualCLIP(snake_case__ ) UpperCAmelCase : int =text_encoder.eval() return text_encoder @property def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] ={ '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } UpperCAmelCase : List[str] =UNetaDConditionModel(**snake_case__ ) return model @property def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : int =VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[Any] =self.dummy_text_encoder UpperCAmelCase : Optional[Any] =self.dummy_tokenizer UpperCAmelCase : int =self.dummy_unet UpperCAmelCase : List[str] =self.dummy_movq UpperCAmelCase : int =DDIMScheduler( num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , ) UpperCAmelCase : Dict ={ '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) UpperCAmelCase : int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case__ ) # create init_image UpperCAmelCase : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : str =Image.fromarray(np.uinta(snake_case__ ) ).convert('''RGB''' ).resize((256, 256) ) # create mask UpperCAmelCase : Any =np.ones((64, 64) , dtype=np.floataa ) UpperCAmelCase : Tuple =0 if str(snake_case__ ).startswith('''mps''' ): UpperCAmelCase : Any =torch.manual_seed(snake_case__ ) else: UpperCAmelCase : Optional[Any] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase : int ={ '''prompt''': '''horse''', '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] ='''cpu''' UpperCAmelCase : Dict =self.get_dummy_components() UpperCAmelCase : Any =self.pipeline_class(**snake_case__ ) UpperCAmelCase : Optional[Any] =pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[Any] =pipe(**self.get_dummy_inputs(snake_case__ ) ) UpperCAmelCase : Dict =output.images UpperCAmelCase : Optional[Any] =pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] UpperCAmelCase : str =image[0, -3:, -3:, -1] UpperCAmelCase : Tuple =image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) UpperCAmelCase : Optional[Any] =np.array( [0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' ) UpperCAmelCase : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) UpperCAmelCase : Any =np.ones((768, 768) , dtype=np.floataa ) UpperCAmelCase : List[str] =0 UpperCAmelCase : Tuple ='''a hat''' UpperCAmelCase : List[Any] =KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) UpperCAmelCase : Optional[Any] =KandinskyInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa ) UpperCAmelCase : Union[str, Any] =pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Any =torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() UpperCAmelCase : Union[str, Any] =pipeline( snake_case__ , image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , ) UpperCAmelCase : Dict =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
348
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool: UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCAmelCase : List[Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__lowerCAmelCase ) ) # The ratio of the area for circle to square is pi/4. UpperCAmelCase : Dict =proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float: '''simple docstring''' return mean( function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None: '''simple docstring''' def identity_function(__lowerCAmelCase ) -> float: return x UpperCAmelCase : List[Any] =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('''******************''' ) def lowerCAmelCase_ ( __lowerCAmelCase )-> None: '''simple docstring''' def function_to_integrate(__lowerCAmelCase ) -> float: return sqrt(4.0 - x * x ) UpperCAmelCase : Dict =area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
def lowerCAmelCase_ ( __lowerCAmelCase = 10_00 )-> int: '''simple docstring''' UpperCAmelCase : Optional[Any] =-1 UpperCAmelCase : Tuple =0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCAmelCase : int =(n * n - 2 * a * n) // (2 * n - 2 * a) UpperCAmelCase : List[str] =n - a - b if c * c == (a * a + b * b): UpperCAmelCase : str =a * b * c if candidate >= product: UpperCAmelCase : List[Any] =candidate return product if __name__ == "__main__": print(f'{solution() = }')
348
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : List[Any] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Union[str, Any] =use_input_mask UpperCAmelCase : Tuple =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : Tuple =hidden_size UpperCAmelCase : Dict =projection_dim UpperCAmelCase : Optional[int] =num_hidden_layers UpperCAmelCase : Dict =num_attention_heads UpperCAmelCase : int =intermediate_size UpperCAmelCase : Any =dropout UpperCAmelCase : Union[str, Any] =attention_dropout UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : str =scope UpperCAmelCase : str =bos_token_id def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : int =None if self.use_input_mask: UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Optional[int] =input_mask.numpy() UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : List[Any] =1 UpperCAmelCase : Tuple =0 UpperCAmelCase : List[Any] =self.get_config() return config, input_ids, tf.convert_to_tensor(snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ ) UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ ) UpperCAmelCase : str =model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () __lowerCamelCase : Dict = False __lowerCamelCase : Optional[Any] = False __lowerCamelCase : Dict = False def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : str =BlipTextModelTester(self ) UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__=True ) -> Any: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
348
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : int = """efficientnet""" def __init__( self , snake_case__ = 3 , snake_case__ = 600 , snake_case__ = 2.0 , snake_case__ = 3.1 , snake_case__ = 8 , snake_case__ = [3, 3, 5, 3, 5, 5, 3] , snake_case__ = [32, 16, 24, 40, 80, 112, 192] , snake_case__ = [16, 24, 40, 80, 112, 192, 320] , snake_case__ = [] , snake_case__ = [1, 2, 2, 2, 1, 2, 1] , snake_case__ = [1, 2, 2, 3, 3, 4, 1] , snake_case__ = [1, 6, 6, 6, 6, 6, 6] , snake_case__ = 0.25 , snake_case__ = "swish" , snake_case__ = 2560 , snake_case__ = "mean" , snake_case__ = 0.02 , snake_case__ = 0.001 , snake_case__ = 0.99 , snake_case__ = 0.5 , snake_case__ = 0.2 , **snake_case__ , ) -> List[Any]: '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase : Union[str, Any] =num_channels UpperCAmelCase : Optional[Any] =image_size UpperCAmelCase : Tuple =width_coefficient UpperCAmelCase : Dict =depth_coefficient UpperCAmelCase : List[Any] =depth_divisor UpperCAmelCase : int =kernel_sizes UpperCAmelCase : Tuple =in_channels UpperCAmelCase : Tuple =out_channels UpperCAmelCase : List[Any] =depthwise_padding UpperCAmelCase : List[Any] =strides UpperCAmelCase : int =num_block_repeats UpperCAmelCase : Optional[Any] =expand_ratios UpperCAmelCase : int =squeeze_expansion_ratio UpperCAmelCase : str =hidden_act UpperCAmelCase : Union[str, Any] =hidden_dim UpperCAmelCase : Optional[Any] =pooling_type UpperCAmelCase : List[Any] =initializer_range UpperCAmelCase : str =batch_norm_eps UpperCAmelCase : Dict =batch_norm_momentum UpperCAmelCase : Any =dropout_rate UpperCAmelCase : Optional[int] =drop_connect_rate UpperCAmelCase : str =sum(snake_case__ ) * 4 class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self ) -> float: '''simple docstring''' return 1e-5
348
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase ) UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase ) return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = CLIPConfig __lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""] def __init__( self , snake_case__ ) -> Dict: '''simple docstring''' super().__init__(snake_case__ ) UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config ) UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ ) UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ ) UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ ) UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ ) @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy() UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy() UpperCAmelCase : Tuple =[] UpperCAmelCase : Dict =image_embeds.shape[0] for i in range(snake_case__ ): UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : str =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx] UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCAmelCase : int =0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCAmelCase : Any =cos_dist[i][concept_idx] UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item() UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case__ ) result.append(snake_case__ ) UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output UpperCAmelCase : List[str] =self.visual_projection(snake_case__ ) UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds ) UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCAmelCase : Optional[Any] =0.0 UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 ) UpperCAmelCase : List[Any] =special_care * 0.01 UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
348
1
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = XLMProphetNetTokenizer __lowerCamelCase : Any = False __lowerCamelCase : List[str] = True def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[Any] =XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int ='''[PAD]''' UpperCAmelCase : Optional[Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''[PAD]''' ) self.assertEqual(vocab_keys[1] , '''[CLS]''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(snake_case__ ) , 1012 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[str] =XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ ) UpperCAmelCase : Any =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(snake_case__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : List[str] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase : str =tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) UpperCAmelCase : int =tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''[UNK]''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''[UNK]''', '''.''', ] , ) @cached_property def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' ) @slow def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any ='''Hello World!''' UpperCAmelCase : Optional[int] =[3_5389, 6672, 49, 2] self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) ) @slow def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[Any] ={'''input_ids''': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
348
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __snake_case = parser.parse_args() __snake_case = '''cpu''' __snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __snake_case = '''path-to-your-trained-model''' __snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __snake_case = pipe.to(device) # to channels last __snake_case = pipe.unet.to(memory_format=torch.channels_last) __snake_case = pipe.vae.to(memory_format=torch.channels_last) __snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __snake_case = torch.randn(2, 4, 64, 64) __snake_case = torch.rand(1) * 9_99 __snake_case = torch.randn(2, 77, 7_68) __snake_case = (sample, timestep, encoder_hidden_status) try: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __snake_case = 6_66 __snake_case = torch.Generator(device).manual_seed(seed) __snake_case = {'''generator''': generator} if args.steps is not None: __snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
348
1
import numpy as np def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1e-1_2 , __lowerCAmelCase = 1_00 , )-> tuple[float, np.ndarray]: '''simple docstring''' assert np.shape(__lowerCAmelCase )[0] == np.shape(__lowerCAmelCase )[1] # Ensure proper dimensionality. assert np.shape(__lowerCAmelCase )[0] == np.shape(__lowerCAmelCase )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__lowerCAmelCase ) == np.iscomplexobj(__lowerCAmelCase ) UpperCAmelCase : List[str] =np.iscomplexobj(__lowerCAmelCase ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__lowerCAmelCase , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. UpperCAmelCase : Dict =False UpperCAmelCase : Tuple =0 UpperCAmelCase : Any =0 UpperCAmelCase : List[str] =1e1_2 while not convergence: # Multiple matrix by the vector. UpperCAmelCase : Optional[Any] =np.dot(__lowerCAmelCase , __lowerCAmelCase ) # Normalize the resulting output vector. UpperCAmelCase : List[Any] =w / np.linalg.norm(__lowerCAmelCase ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) UpperCAmelCase : Any =vector.conj().T if is_complex else vector.T UpperCAmelCase : Tuple =np.dot(__lowerCAmelCase , np.dot(__lowerCAmelCase , __lowerCAmelCase ) ) # Check convergence. UpperCAmelCase : Union[str, Any] =np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: UpperCAmelCase : Optional[Any] =True UpperCAmelCase : int =lambda_ if is_complex: UpperCAmelCase : Dict =np.real(lambda_ ) return lambda_, vector def lowerCAmelCase_ ( )-> None: '''simple docstring''' UpperCAmelCase : Optional[int] =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) UpperCAmelCase : Optional[int] =np.array([41, 4, 20] ) UpperCAmelCase : Tuple =real_input_matrix.astype(np.complexaaa ) UpperCAmelCase : List[Any] =np.triu(1j * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T UpperCAmelCase : Any =np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": UpperCAmelCase : Any =real_input_matrix UpperCAmelCase : Union[str, Any] =real_vector elif problem_type == "complex": UpperCAmelCase : Union[str, Any] =complex_input_matrix UpperCAmelCase : List[str] =complex_vector # Our implementation. UpperCAmelCase , UpperCAmelCase : List[Any] =power_iteration(__lowerCAmelCase , __lowerCAmelCase ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). UpperCAmelCase , UpperCAmelCase : Optional[int] =np.linalg.eigh(__lowerCAmelCase ) # Last eigenvalue is the maximum one. UpperCAmelCase : Union[str, Any] =eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. UpperCAmelCase : Optional[int] =eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__lowerCAmelCase ) - np.abs(__lowerCAmelCase ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
348
__snake_case = '''Input must be a string of 8 numbers plus letter''' __snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE''' def lowerCAmelCase_ ( __lowerCAmelCase )-> bool: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}''' raise TypeError(__lowerCAmelCase ) UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper() if len(__lowerCAmelCase ) != 9: raise ValueError(__lowerCAmelCase ) try: UpperCAmelCase : int =int(spanish_id_clean[0:8] ) UpperCAmelCase : Optional[int] =spanish_id_clean[8] except ValueError as ex: raise ValueError(__lowerCAmelCase ) from ex if letter.isdigit(): raise ValueError(__lowerCAmelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
348
1
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def lowerCAmelCase_ ( __lowerCAmelCase )-> List[Any]: '''simple docstring''' UpperCAmelCase : str =os.path.join(args.tf_model_dir , '''parameters.json''' ) UpperCAmelCase : List[Any] =json.loads(open(__lowerCAmelCase ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): UpperCAmelCase : Any =args.output + '''.pt''' UpperCAmelCase : str =OrderedDict() with tf.device('''/CPU:0''' ): UpperCAmelCase : Union[str, Any] =tf.train.load_checkpoint(args.tf_model_dir ) UpperCAmelCase : Tuple =reader.get_variable_to_shape_map() for key_name in shapes.keys(): UpperCAmelCase : Optional[int] =reader.get_tensor(__lowerCAmelCase ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): UpperCAmelCase : Optional[Any] =int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): UpperCAmelCase : Optional[Any] =8 UpperCAmelCase : List[Any] ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time UpperCAmelCase : Optional[Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : Any =torch.tensor(__lowerCAmelCase ) elif key_name.startswith('''model/moe''' ): UpperCAmelCase : str =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): UpperCAmelCase : int ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player UpperCAmelCase : List[str] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : Any =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/softmlp/kernel''' ): UpperCAmelCase : Union[str, Any] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player UpperCAmelCase : Union[str, Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : Optional[Any] =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): UpperCAmelCase : Union[str, Any] =key_name[-9:-7] for i in range(16 ): UpperCAmelCase : Optional[int] ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) UpperCAmelCase : Any =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided UpperCAmelCase : List[str] =torch.tensor(__lowerCAmelCase ) elif key_name.startswith('''model/mlp''' ): UpperCAmelCase : int =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): UpperCAmelCase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player UpperCAmelCase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : List[Any] =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/p1/bias''' ): UpperCAmelCase : Union[str, Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player UpperCAmelCase : Optional[int] =vnp.copy() # same because it is one dimensional UpperCAmelCase : List[Any] =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/p2/kernel''' ): UpperCAmelCase : str ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player UpperCAmelCase : Optional[Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : List[str] =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/p2/bias''' ): UpperCAmelCase : Dict ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player UpperCAmelCase : Tuple =vnp.copy() # same because it is one dimensional UpperCAmelCase : List[str] =torch.tensor(__lowerCAmelCase ) elif key_name.startswith('''model/ln''' ): UpperCAmelCase : Optional[int] =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): UpperCAmelCase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player UpperCAmelCase : Optional[Any] =vnp.copy() # same because it is one dimensional UpperCAmelCase : Tuple =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/g''' ): UpperCAmelCase : str ='''model.blocks.%d.feed_forward.norm.weight''' % player UpperCAmelCase : int =vnp.copy() # same because it is one dimensional UpperCAmelCase : Tuple =torch.tensor(__lowerCAmelCase ) elif key_name.startswith('''model/att''' ): UpperCAmelCase : int =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): UpperCAmelCase : Any =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum UpperCAmelCase : List[str] =state[:, 0, :, :] UpperCAmelCase : Any =state[:, 1, :, :] UpperCAmelCase : Union[str, Any] =state[:, 2, :, :] UpperCAmelCase : int =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : List[Any] =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : Optional[Any] =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : Any ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player UpperCAmelCase : Optional[Any] =torch.tensor(__lowerCAmelCase ) UpperCAmelCase : Union[str, Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player UpperCAmelCase : Any =torch.tensor(__lowerCAmelCase ) UpperCAmelCase : List[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player UpperCAmelCase : List[Any] =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/o/kernel''' ): UpperCAmelCase : List[str] ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player UpperCAmelCase : Union[str, Any] =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : Dict =torch.tensor(__lowerCAmelCase ) elif key_name.startswith('''model/an''' ): UpperCAmelCase : Tuple =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): UpperCAmelCase : Optional[Any] ='''model.blocks.%d.self_attn.norm.bias''' % player UpperCAmelCase : int =vnp.copy() # same because it is one dimensional UpperCAmelCase : Dict =torch.tensor(__lowerCAmelCase ) elif key_name.endswith('''/g''' ): UpperCAmelCase : Optional[Any] ='''model.blocks.%d.self_attn.norm.weight''' % player UpperCAmelCase : Any =vnp.copy() # same because it is one dimensional UpperCAmelCase : Union[str, Any] =torch.tensor(__lowerCAmelCase ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): UpperCAmelCase : List[Any] ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] UpperCAmelCase : int ='''model.%s.weight''' % nlayer UpperCAmelCase : Tuple =vnp.copy() # same in embedded UpperCAmelCase : str =torch.tensor(__lowerCAmelCase ) if key_name.startswith('''model/wte''' ): UpperCAmelCase : Optional[int] ='''lm_head.weight''' UpperCAmelCase : Optional[int] =vnp.copy() # same in embedded UpperCAmelCase : int =torch.tensor(__lowerCAmelCase ) elif key_name.startswith('''model/wob''' ): UpperCAmelCase : int ='''final_logits_bias''' UpperCAmelCase : Any =vnp.copy() # same in embedded UpperCAmelCase : Tuple =state.reshape((1, -1) ) UpperCAmelCase : Optional[int] =torch.tensor(__lowerCAmelCase ) elif key_name == "model/dense/kernel": UpperCAmelCase : List[Any] ='''model.last_project.weight''' UpperCAmelCase : Union[str, Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase : Tuple =torch.tensor(__lowerCAmelCase ) elif key_name == "model/dense_1/bias": UpperCAmelCase : str ='''model.last_project.bias''' UpperCAmelCase : Any =vnp.copy() # same because it is one dimensional UpperCAmelCase : int =torch.tensor(__lowerCAmelCase ) torch.save(__lowerCAmelCase , args.output ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') __snake_case = parser.parse_args() convert_tf_gptsan_to_pt(args)
348
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError('''both inputs must be positive integers''' ) UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:] if shift_amount >= len(__lowerCAmelCase ): return "0b0" UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:] else: # Get binary (2's complement) representation of negative number UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Optional[Any] =( '''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number ) if shift_amount >= len(__lowerCAmelCase ): return "0b" + binary_number[0] * len(__lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
import argparse import os import torch from transformers.utils import WEIGHTS_NAME __snake_case = ['''small''', '''medium''', '''large'''] __snake_case = '''lm_head.decoder.weight''' __snake_case = '''lm_head.weight''' def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[str]: '''simple docstring''' UpperCAmelCase : Dict =torch.load(__lowerCAmelCase ) UpperCAmelCase : Optional[int] =d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __snake_case = parser.parse_args() for MODEL in DIALOGPT_MODELS: __snake_case = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl') __snake_case = f'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
348
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) # TODO Update this __snake_case = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """esm""" def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =vocab_size UpperCAmelCase : str =hidden_size UpperCAmelCase : List[Any] =num_hidden_layers UpperCAmelCase : Optional[Any] =num_attention_heads UpperCAmelCase : str =intermediate_size UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : int =attention_probs_dropout_prob UpperCAmelCase : Dict =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : Union[str, Any] =layer_norm_eps UpperCAmelCase : Dict =position_embedding_type UpperCAmelCase : Optional[Any] =use_cache UpperCAmelCase : int =emb_layer_norm_before UpperCAmelCase : List[str] =token_dropout UpperCAmelCase : Optional[Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase : Optional[Any] =EsmFoldConfig() elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ ) UpperCAmelCase : Tuple =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase : Any =get_default_vocab_list() else: UpperCAmelCase : Tuple =vocab_list else: UpperCAmelCase : Optional[int] =None UpperCAmelCase : Union[str, Any] =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , snake_case__ ): UpperCAmelCase : str =self.esmfold_config.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : str = None __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : float = 0 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : int = 128 __lowerCamelCase : "TrunkConfig" = None def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.trunk is None: UpperCAmelCase : str =TrunkConfig() elif isinstance(self.trunk , snake_case__ ): UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =asdict(self ) UpperCAmelCase : Any =self.trunk.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 48 __lowerCamelCase : int = 1024 __lowerCamelCase : int = 128 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : float = 0 __lowerCamelCase : float = 0 __lowerCamelCase : bool = False __lowerCamelCase : int = 4 __lowerCamelCase : Optional[int] = 128 __lowerCamelCase : "StructureModuleConfig" = None def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' if self.structure_module is None: UpperCAmelCase : Any =StructureModuleConfig() elif isinstance(self.structure_module , snake_case__ ): UpperCAmelCase : str =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =asdict(self ) UpperCAmelCase : Tuple =self.structure_module.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 384 __lowerCamelCase : int = 128 __lowerCamelCase : int = 16 __lowerCamelCase : int = 128 __lowerCamelCase : int = 12 __lowerCamelCase : int = 4 __lowerCamelCase : int = 8 __lowerCamelCase : float = 0.1 __lowerCamelCase : int = 8 __lowerCamelCase : int = 1 __lowerCamelCase : int = 2 __lowerCamelCase : int = 7 __lowerCamelCase : int = 10 __lowerCamelCase : float = 1E-8 __lowerCamelCase : float = 1E5 def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return asdict(self ) def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
348
1
import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = """MCTCTFeatureExtractor""" __lowerCamelCase : Optional[int] = """AutoTokenizer""" def __init__( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) UpperCAmelCase : Any =self.feature_extractor UpperCAmelCase : Dict =False def __call__( self , *snake_case__ , **snake_case__ ) -> Dict: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) UpperCAmelCase : Optional[Any] =kwargs.pop('''raw_speech''' ) else: UpperCAmelCase : Union[str, Any] =kwargs.pop('''audio''' , snake_case__ ) UpperCAmelCase : List[Any] =kwargs.pop('''sampling_rate''' , snake_case__ ) UpperCAmelCase : Union[str, Any] =kwargs.pop('''text''' , snake_case__ ) if len(snake_case__ ) > 0: UpperCAmelCase : Tuple =args[0] UpperCAmelCase : Optional[Any] =args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: UpperCAmelCase : str =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ ) if text is not None: UpperCAmelCase : List[str] =self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif audio is None: return encodings else: UpperCAmelCase : Any =encodings['''input_ids'''] return inputs def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*snake_case__ , **snake_case__ ) UpperCAmelCase : Optional[Any] =kwargs.pop('''input_features''' , snake_case__ ) UpperCAmelCase : Dict =kwargs.pop('''labels''' , snake_case__ ) if len(snake_case__ ) > 0: UpperCAmelCase : Any =args[0] UpperCAmelCase : Dict =args[1:] if input_features is not None: UpperCAmelCase : Any =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ ) if labels is not None: UpperCAmelCase : Optional[int] =self.tokenizer.pad(snake_case__ , **snake_case__ ) if labels is None: return input_features elif input_features is None: return labels else: UpperCAmelCase : Any =labels['''input_ids'''] return input_features def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) UpperCAmelCase : List[str] =True UpperCAmelCase : Union[str, Any] =self.tokenizer yield UpperCAmelCase : List[str] =self.feature_extractor UpperCAmelCase : Dict =False
348
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,) __lowerCamelCase : List[str] = 10 def UpperCAmelCase__ ( self , **snake_case__ ) -> str: '''simple docstring''' UpperCAmelCase : int ={ '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**snake_case__ ) return config def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : str =self.dummy_model() UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Any =model(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : int =output.prev_sample UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : Any =self.scheduler_classes[0] UpperCAmelCase : Optional[int] =self.get_scheduler_config() UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : Optional[int] =self.dummy_model() UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : str =sample.to(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =output.prev_sample UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' if torch_device == "mps": return UpperCAmelCase : List[Any] =self.scheduler_classes[0] UpperCAmelCase : Dict =self.get_scheduler_config() UpperCAmelCase : List[str] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ ) UpperCAmelCase : int =self.dummy_model() UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase : int =model(snake_case__ , snake_case__ ) UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : List[str] =output.prev_sample UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) ) if str(snake_case__ ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
348
1
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Union[str, Any] =len(__lowerCAmelCase ), len(grid[0] ) if ( min(__lowerCAmelCase , __lowerCAmelCase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) UpperCAmelCase : List[Any] =0 count += depth_first_search(__lowerCAmelCase , row + 1 , __lowerCAmelCase , __lowerCAmelCase ) count += depth_first_search(__lowerCAmelCase , row - 1 , __lowerCAmelCase , __lowerCAmelCase ) count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col + 1 , __lowerCAmelCase ) count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col - 1 , __lowerCAmelCase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
348
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id ) UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean() UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item()) UpperCAmelCase : List[str] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
348
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : str = """megatron-bert""" def __init__( self , snake_case__=2_9056 , snake_case__=1024 , snake_case__=24 , snake_case__=16 , snake_case__=4096 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=True , **snake_case__ , ) -> List[str]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : str =vocab_size UpperCAmelCase : Optional[Any] =hidden_size UpperCAmelCase : str =num_hidden_layers UpperCAmelCase : Tuple =num_attention_heads UpperCAmelCase : Union[str, Any] =hidden_act UpperCAmelCase : List[str] =intermediate_size UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : List[str] =attention_probs_dropout_prob UpperCAmelCase : Dict =max_position_embeddings UpperCAmelCase : Optional[Any] =type_vocab_size UpperCAmelCase : int =initializer_range UpperCAmelCase : Optional[Any] =layer_norm_eps UpperCAmelCase : int =position_embedding_type UpperCAmelCase : Optional[Any] =use_cache
348
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[Any] =ort.SessionOptions() UpperCAmelCase : Optional[int] =False return options def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Dict ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : Any =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) UpperCAmelCase : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench''' UpperCAmelCase : int =np.random.RandomState(0 ) UpperCAmelCase : str =pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , ) UpperCAmelCase : Dict =output.images UpperCAmelCase : int =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
348
1
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , __lowerCAmelCase ) UpperCAmelCase : int =datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCAmelCase : Optional[int] =dataset_size < in_memory_max_size else: UpperCAmelCase : Any =False UpperCAmelCase : str =is_small_dataset(__lowerCAmelCase ) assert result == expected
348
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase_ ( )-> int: '''simple docstring''' UpperCAmelCase : str ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase ) return dataset class __snake_case ( lowerCamelCase__ ): def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[str] =get_dataset() UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str =get_dataset() UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ ) self.assertEqual(len(snake_case__ ) , 2 ) print(snake_case__ ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
348
1
import qiskit def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' ) UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = half_adder(1, 1) print(f'Half Adder Output Qubit Counts: {counts}')
348
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str: '''simple docstring''' super().__init__() UpperCAmelCase : Optional[Any] =learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ ) else: UpperCAmelCase : Union[str, Any] =None UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : VQModel __lowerCamelCase : CLIPTextModel __lowerCamelCase : CLIPTokenizer __lowerCamelCase : TransformeraDModel __lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings __lowerCamelCase : VQDiffusionScheduler def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int: '''simple docstring''' super().__init__() self.register_modules( vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1 # get prompt text embeddings UpperCAmelCase : Optional[int] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) UpperCAmelCase : int =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate text embeddings for each generation per prompt UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 ) else: UpperCAmelCase : str =[''''''] * batch_size UpperCAmelCase : Tuple =text_input_ids.shape[-1] UpperCAmelCase : Optional[Any] =self.tokenizer( snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , ) UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1] UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 ) UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =1 elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Tuple =len(snake_case__ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' ) UpperCAmelCase : Tuple =batch_size * num_images_per_prompt UpperCAmelCase : List[str] =guidance_scale > 1.0 UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(snake_case__ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1 UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCAmelCase : Any =latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(snake_case__ , device=self.device ) UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device ) UpperCAmelCase : Optional[int] =latents for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 ) UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ ) UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase : Optional[Any] =model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ ) UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ ) UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ ) UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ ) UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase : int =keep_mask[:, :-1, :] UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase : Dict =log_p_x_0.clone() UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0) return rv
348
1
import sys from collections import defaultdict class __snake_case : def __init__( self ) -> str: '''simple docstring''' UpperCAmelCase : List[str] =[] def UpperCAmelCase__ ( self , snake_case__ ) -> Dict: '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Any: '''simple docstring''' UpperCAmelCase : int =pos def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCAmelCase : Optional[Any] =2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCAmelCase : Union[str, Any] =2 * start + 1 else: UpperCAmelCase : Optional[Any] =2 * start + 2 if heap[smallest_child] < heap[start]: UpperCAmelCase , UpperCAmelCase : Optional[Any] =heap[smallest_child], positions[smallest_child] UpperCAmelCase , UpperCAmelCase : Tuple =( heap[start], positions[start], ) UpperCAmelCase , UpperCAmelCase : List[Any] =temp, tempa UpperCAmelCase : List[str] =self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , snake_case__ ) self.top_to_bottom(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : str =position[index] while index != 0: UpperCAmelCase : List[Any] =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: UpperCAmelCase : Any =heap[parent] UpperCAmelCase : Any =position[parent] self.set_position(position[parent] , snake_case__ ) else: UpperCAmelCase : Any =val UpperCAmelCase : Tuple =temp self.set_position(snake_case__ , snake_case__ ) break UpperCAmelCase : Tuple =parent else: UpperCAmelCase : Tuple =val UpperCAmelCase : Optional[Any] =temp self.set_position(snake_case__ , 0 ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =len(snake_case__ ) // 2 - 1 for i in range(snake_case__ , -1 , -1 ): self.top_to_bottom(snake_case__ , snake_case__ , len(snake_case__ ) , snake_case__ ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =positions[0] UpperCAmelCase : List[Any] =sys.maxsize self.top_to_bottom(snake_case__ , 0 , len(snake_case__ ) , snake_case__ ) return temp def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple: '''simple docstring''' UpperCAmelCase : int =Heap() UpperCAmelCase : Any =[0] * len(__lowerCAmelCase ) UpperCAmelCase : Tuple =[-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCAmelCase : Tuple =[] # Heap of Distance of vertices from their neighboring vertex UpperCAmelCase : str =[] for vertex in range(len(__lowerCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__lowerCAmelCase ) heap.node_position.append(__lowerCAmelCase ) UpperCAmelCase : str =[] UpperCAmelCase : int =1 UpperCAmelCase : Tuple =sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCAmelCase : Any =0 UpperCAmelCase : str =distance heap.heapify(__lowerCAmelCase , __lowerCAmelCase ) for _ in range(1 , len(__lowerCAmelCase ) ): UpperCAmelCase : Optional[Any] =heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCAmelCase : Optional[Any] =1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__lowerCAmelCase )] ): UpperCAmelCase : List[Any] =distance heap.bottom_to_top( __lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Tuple =vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __snake_case = int(input('''Enter number of edges: ''').strip()) __snake_case = defaultdict(list) for _ in range(edges_number): __snake_case = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
348
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Any =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple =self.dummy_uncond_unet UpperCAmelCase : Optional[int] =KarrasVeScheduler() UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : List[str] =torch.manual_seed(0 ) UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : str =torch.manual_seed(0 ) UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0] UpperCAmelCase : Any =image[0, -3:, -3:, -1] UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256''' UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ ) UpperCAmelCase : Dict =KarrasVeScheduler() UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase : Any =torch.manual_seed(0 ) UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
348
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Dict = """altclip_text_model""" def __init__( self , snake_case__=25_0002 , snake_case__=1024 , snake_case__=24 , snake_case__=16 , snake_case__=4096 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=514 , snake_case__=1 , snake_case__=0.02 , snake_case__=0.02 , snake_case__=1e-05 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=768 , **snake_case__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =vocab_size UpperCAmelCase : Dict =hidden_size UpperCAmelCase : Optional[Any] =num_hidden_layers UpperCAmelCase : str =num_attention_heads UpperCAmelCase : int =hidden_act UpperCAmelCase : List[str] =intermediate_size UpperCAmelCase : int =hidden_dropout_prob UpperCAmelCase : Union[str, Any] =attention_probs_dropout_prob UpperCAmelCase : Optional[int] =max_position_embeddings UpperCAmelCase : int =type_vocab_size UpperCAmelCase : Tuple =initializer_range UpperCAmelCase : Any =initializer_factor UpperCAmelCase : Union[str, Any] =layer_norm_eps UpperCAmelCase : int =position_embedding_type UpperCAmelCase : Tuple =use_cache UpperCAmelCase : str =project_dim class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Any = """altclip_vision_model""" def __init__( self , snake_case__=768 , snake_case__=3072 , snake_case__=512 , snake_case__=12 , snake_case__=12 , snake_case__=3 , snake_case__=224 , snake_case__=32 , snake_case__="quick_gelu" , snake_case__=1e-5 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , **snake_case__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase : Optional[Any] =hidden_size UpperCAmelCase : Tuple =intermediate_size UpperCAmelCase : int =projection_dim UpperCAmelCase : Union[str, Any] =num_hidden_layers UpperCAmelCase : List[str] =num_attention_heads UpperCAmelCase : Optional[int] =num_channels UpperCAmelCase : Tuple =patch_size UpperCAmelCase : int =image_size UpperCAmelCase : int =initializer_range UpperCAmelCase : Tuple =initializer_factor UpperCAmelCase : List[Any] =attention_dropout UpperCAmelCase : str =layer_norm_eps UpperCAmelCase : Tuple =hidden_act @classmethod def UpperCAmelCase__ ( cls , snake_case__ , **snake_case__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) UpperCAmelCase , UpperCAmelCase : str =cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": UpperCAmelCase : Any =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """altclip""" __lowerCamelCase : int = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=2.6592 , **snake_case__ ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =kwargs.pop('''text_config_dict''' , snake_case__ ) UpperCAmelCase : int =kwargs.pop('''vision_config_dict''' , snake_case__ ) super().__init__(**snake_case__ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: UpperCAmelCase : int ={} # This is the complete result when using `text_config_dict`. UpperCAmelCase : Tuple =AltCLIPTextConfig(**snake_case__ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: UpperCAmelCase : Tuple =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: UpperCAmelCase : List[str] =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(snake_case__ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: UpperCAmelCase : Optional[int] ={} # This is the complete result when using `vision_config_dict`. UpperCAmelCase : List[Any] =AltCLIPVisionConfig(**snake_case__ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: UpperCAmelCase : Dict ={ str(snake_case__ ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: UpperCAmelCase : Dict =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: UpperCAmelCase : Any =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(snake_case__ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: UpperCAmelCase : List[Any] ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: UpperCAmelCase : List[Any] ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) UpperCAmelCase : Any =AltCLIPTextConfig(**snake_case__ ) UpperCAmelCase : Tuple =AltCLIPVisionConfig(**snake_case__ ) UpperCAmelCase : Optional[Any] =projection_dim UpperCAmelCase : List[str] =logit_scale_init_value UpperCAmelCase : int =1.0 @classmethod def UpperCAmelCase__ ( cls , snake_case__ , snake_case__ , **snake_case__ ) -> Optional[Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any =copy.deepcopy(self.__dict__ ) UpperCAmelCase : Dict =self.text_config.to_dict() UpperCAmelCase : str =self.vision_config.to_dict() UpperCAmelCase : Optional[Any] =self.__class__.model_type return output
348
import qiskit def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' ) UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": __snake_case = half_adder(1, 1) print(f'Half Adder Output Qubit Counts: {counts}')
348
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] UpperCAmelCase : List[str] =True if '''large''' in model_name or '''huge''' in model_name else False UpperCAmelCase : Union[str, Any] =True if '''large''' in model_name or '''huge''' in model_name else False UpperCAmelCase : Tuple =True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: UpperCAmelCase : List[Any] =[3, 3, 3, 3] UpperCAmelCase : Any =[5, 5, 5, 5] elif "fl4" in model_name: UpperCAmelCase : Any =[4, 4, 4, 4] UpperCAmelCase : Tuple =[3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: UpperCAmelCase : Union[str, Any] =[3, 3, 3, 3] if "lrf" in model_name: UpperCAmelCase : Any =[3, 3, 3, 3] else: UpperCAmelCase : Optional[int] =[2, 2, 2, 2] if "tiny" in model_name: UpperCAmelCase : Union[str, Any] =96 elif "small" in model_name: UpperCAmelCase : Optional[int] =96 elif "base" in model_name: UpperCAmelCase : List[str] =1_28 elif "large" in model_name: UpperCAmelCase : Optional[int] =1_92 elif "xlarge" in model_name: UpperCAmelCase : Dict =2_56 elif "huge" in model_name: UpperCAmelCase : int =3_52 # set label information UpperCAmelCase : int ='''huggingface/label-files''' if "large" in model_name or "huge" in model_name: UpperCAmelCase : str ='''imagenet-22k-id2label.json''' else: UpperCAmelCase : Dict ='''imagenet-1k-id2label.json''' UpperCAmelCase : int =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase : List[Any] ={int(__lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : str ={v: k for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] =FocalNetConfig( embed_dim=__lowerCAmelCase , depths=__lowerCAmelCase , focal_levels=__lowerCAmelCase , focal_windows=__lowerCAmelCase , use_conv_embed=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , use_post_layernorm=__lowerCAmelCase , use_layerscale=__lowerCAmelCase , ) return config def lowerCAmelCase_ ( __lowerCAmelCase )-> Any: '''simple docstring''' if "patch_embed.proj" in name: UpperCAmelCase : Optional[int] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: UpperCAmelCase : Tuple =name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: UpperCAmelCase : Optional[Any] ='''encoder.''' + name if "encoder.layers" in name: UpperCAmelCase : Dict =name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: UpperCAmelCase : Any =name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: UpperCAmelCase : Union[str, Any] =name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: UpperCAmelCase : Optional[int] =name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: UpperCAmelCase : Any =name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: UpperCAmelCase : Union[str, Any] =name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": UpperCAmelCase : Dict ='''layernorm.weight''' if name == "norm.bias": UpperCAmelCase : List[Any] ='''layernorm.bias''' if "head" in name: UpperCAmelCase : List[Any] =name.replace('''head''' , '''classifier''' ) else: UpperCAmelCase : Any ='''focalnet.''' + name return name def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> int: '''simple docstring''' UpperCAmelCase : Optional[int] ={ '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on UpperCAmelCase : int =model_name_to_url[model_name] print('''Checkpoint URL: ''' , __lowerCAmelCase ) UpperCAmelCase : int =torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): UpperCAmelCase : List[Any] =state_dict.pop(__lowerCAmelCase ) UpperCAmelCase : List[str] =val UpperCAmelCase : Any =get_focalnet_config(__lowerCAmelCase ) UpperCAmelCase : Optional[int] =FocalNetForImageClassification(__lowerCAmelCase ) model.eval() # load state dict model.load_state_dict(__lowerCAmelCase ) # verify conversion UpperCAmelCase : Optional[int] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Union[str, Any] =BitImageProcessor( do_resize=__lowerCAmelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCAmelCase , crop_size=2_24 , do_normalize=__lowerCAmelCase , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase , ) UpperCAmelCase : Dict =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) UpperCAmelCase : Any =processor(images=__lowerCAmelCase , return_tensors='''pt''' ) UpperCAmelCase : List[str] =transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) UpperCAmelCase : Optional[Any] =image_transforms(__lowerCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , __lowerCAmelCase , atol=1e-4 ) UpperCAmelCase : List[str] =model(**__lowerCAmelCase ) UpperCAmelCase : Any =outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": UpperCAmelCase : Dict =torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": UpperCAmelCase : str =torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": UpperCAmelCase : Dict =torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": UpperCAmelCase : Dict =torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": UpperCAmelCase : Tuple =torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": UpperCAmelCase : Union[str, Any] =torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) __snake_case = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
348
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __snake_case : __lowerCamelCase : str = BlenderbotConfig __lowerCamelCase : Optional[Any] = {} __lowerCamelCase : Optional[int] = """gelu""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : Dict =seq_length UpperCAmelCase : Optional[Any] =is_training UpperCAmelCase : List[str] =use_labels UpperCAmelCase : List[Any] =vocab_size UpperCAmelCase : Optional[int] =hidden_size UpperCAmelCase : Tuple =num_hidden_layers UpperCAmelCase : Any =num_attention_heads UpperCAmelCase : Optional[int] =intermediate_size UpperCAmelCase : str =hidden_dropout_prob UpperCAmelCase : Optional[int] =attention_probs_dropout_prob UpperCAmelCase : str =max_position_embeddings UpperCAmelCase : List[Any] =eos_token_id UpperCAmelCase : Optional[int] =pad_token_id UpperCAmelCase : Tuple =bos_token_id def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[Any] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder() UpperCAmelCase : Any =inputs_dict['''input_ids'''] UpperCAmelCase : str =input_ids[:1, :] UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :] UpperCAmelCase : Tuple =inputs_dict['''head_mask'''] UpperCAmelCase : List[Any] =1 # first forward pass UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str: '''simple docstring''' if attention_mask is None: UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : Tuple =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase : Dict = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[str] =TFBlenderbotModelTester(self ) UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class __snake_case ( unittest.TestCase ): __lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""] __lowerCamelCase : Dict = """facebook/blenderbot-400M-distill""" @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' ) UpperCAmelCase : Optional[int] =self.model.generate( model_inputs.input_ids , ) UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
348
1