code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Any = { "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = ["VisionEncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = ["TFVisionEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = ["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
323
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : List[str] = "https://openaipublic.azureedge.net/jukebox/models/" __lowerCamelCase : List[Any] = { "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def lowerCamelCase_(lowerCamelCase_ ) -> int: if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: UpperCAmelCase = key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: UpperCAmelCase = key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: UpperCAmelCase = key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: UpperCAmelCase = key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: UpperCAmelCase = key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: UpperCAmelCase = key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: UpperCAmelCase = key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: UpperCAmelCase = key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: UpperCAmelCase = {} import re UpperCAmelCase = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) UpperCAmelCase = re.compile( r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) UpperCAmelCase = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) UpperCAmelCase = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) UpperCAmelCase = re.compile( r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) UpperCAmelCase = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) UpperCAmelCase = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) UpperCAmelCase = re.compile( r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) UpperCAmelCase = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_encoder_block_conv_in.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) UpperCAmelCase = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' UpperCAmelCase = re_encoder_block_conv_in.sub(lowerCamelCase_ , lowerCamelCase_ ) elif re_encoder_block_resnet.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_encoder_block_resnet.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) UpperCAmelCase = {"1": 1, "3": 2}[groups[-2]] UpperCAmelCase = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' UpperCAmelCase = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' UpperCAmelCase = prefix + resnet_block UpperCAmelCase = re_encoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ ) elif re_encoder_block_proj_out.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_encoder_block_proj_out.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' UpperCAmelCase = re_encoder_block_proj_out.sub(lowerCamelCase_ , lowerCamelCase_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_decoder_block_conv_out.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2 UpperCAmelCase = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' UpperCAmelCase = re_decoder_block_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ ) elif re_decoder_block_resnet.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_decoder_block_resnet.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2 UpperCAmelCase = {"1": 1, "3": 2}[groups[-2]] UpperCAmelCase = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' UpperCAmelCase = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' UpperCAmelCase = prefix + resnet_block UpperCAmelCase = re_decoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ ) elif re_decoder_block_proj_in.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_decoder_block_proj_in.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' UpperCAmelCase = re_decoder_block_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_prior_cond_conv_out.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2 UpperCAmelCase = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' UpperCAmelCase = re_prior_cond_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ ) elif re_prior_cond_resnet.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_prior_cond_resnet.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2 UpperCAmelCase = {"1": 1, "3": 2}[groups[-2]] UpperCAmelCase = F'conditioner_blocks.upsampler.upsample_block.{block_index}.' UpperCAmelCase = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' UpperCAmelCase = prefix + resnet_block UpperCAmelCase = re_prior_cond_resnet.sub(lowerCamelCase_ , lowerCamelCase_ ) elif re_prior_cond_proj_in.fullmatch(lowerCamelCase_ ): UpperCAmelCase = re_prior_cond_proj_in.match(lowerCamelCase_ ) UpperCAmelCase = regex_match.groups() UpperCAmelCase = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}' UpperCAmelCase = re_prior_cond_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ ) # keep original key else: UpperCAmelCase = original_key UpperCAmelCase = replace_key(lowerCamelCase_ ) if F'{key_prefix}.{key}' not in model_state_dict or key is None: print(F'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape: UpperCAmelCase = model_state_dict[F'{key_prefix}.{key}'] print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) UpperCAmelCase = original_key UpperCAmelCase = original_key UpperCAmelCase = value return new_dict @torch.no_grad() def lowerCamelCase_(lowerCamelCase_=None , lowerCamelCase_=None ) -> str: for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): UpperCAmelCase = requests.get(F'{PREFIX}{file}' , allow_redirects=lowerCamelCase_ ) os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=lowerCamelCase_ ) open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , "wb" ).write(r.content ) UpperCAmelCase = MODEL_MAPPING[model_name.split("/" )[-1]] UpperCAmelCase = JukeboxConfig.from_pretrained(lowerCamelCase_ ) UpperCAmelCase = JukeboxModel(lowerCamelCase_ ) UpperCAmelCase = [] UpperCAmelCase = {} for i, dict_name in enumerate(lowerCamelCase_ ): UpperCAmelCase = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"] UpperCAmelCase = {} for k in old_dic.keys(): if k.endswith(".b" ): UpperCAmelCase = old_dic[k] elif k.endswith(".w" ): UpperCAmelCase = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: UpperCAmelCase = old_dic[k] else: UpperCAmelCase = old_dic[k] UpperCAmelCase = "vqvae" if i == 0 else F'priors.{3 - i}' UpperCAmelCase = fix_jukebox_keys(lowerCamelCase_ , model.state_dict() , lowerCamelCase_ , lowerCamelCase_ ) weight_dict.append(lowerCamelCase_ ) UpperCAmelCase = weight_dict.pop(0 ) model.vqvae.load_state_dict(lowerCamelCase_ ) for i in range(len(lowerCamelCase_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) with open(F'{pytorch_dump_folder_path}/mapping.json' , "w" ) as txtfile: json.dump(lowerCamelCase_ , lowerCamelCase_ ) print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase_ ) return weight_dict if __name__ == "__main__": __lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="jukebox-5b-lyrics", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="jukebox-5b-lyrics-converted", type=str, help="Path to the output PyTorch model directory.", ) __lowerCamelCase : Optional[int] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
323
1
'''simple docstring''' import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = RoCBertTokenizer SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = filter_non_english def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' super().setUp() lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd'] lowerCamelCase_ = {} lowerCamelCase_ = {} for i, value in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = i lowerCamelCase_ = i lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCamelCase_ = tokenizer.tokenize('你好[SEP]你是谁' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['你', '好', '[SEP]', '你', '是', '谁'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCamelCase_ = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = i lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) if self.test_rust_tokenizer: lowerCamelCase_ = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' lowerCamelCase_ = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , 'do_lower_case' ) else False lowerCamelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = ['的', '人', '有'] lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = True lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = False lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) # it is expected that only the first Chinese character is not preceded by "##". lowerCamelCase_ = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ ) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCamelCase_ = tokenizer.encode('你好' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.encode('你是谁' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): lowerCamelCase_ = '你好,你是谁' lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.prepare_for_model( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
384
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar A_ = TypeVar("T") class UpperCAmelCase ( Generic[T] ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' lowerCamelCase_ = data lowerCamelCase_ = self lowerCamelCase_ = 0 class UpperCAmelCase ( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: '''simple docstring''' lowerCamelCase_ = {} def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' lowerCamelCase_ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> DisjointSetTreeNode[T]: '''simple docstring''' lowerCamelCase_ = self.map[data] if elem_ref != elem_ref.parent: lowerCamelCase_ = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' if nodea.rank > nodea.rank: lowerCamelCase_ = nodea else: lowerCamelCase_ = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) ) class UpperCAmelCase ( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: '''simple docstring''' lowerCamelCase_ = {} def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' if node not in self.connections: lowerCamelCase_ = {} def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' self.add_node(SCREAMING_SNAKE_CASE_ ) self.add_node(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = weight lowerCamelCase_ = weight def UpperCamelCase( self ) -> GraphUndirectedWeighted[T]: '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] ) # creating the disjoint set lowerCamelCase_ = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(SCREAMING_SNAKE_CASE_ ) # MST generation lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edges[index] index += 1 lowerCamelCase_ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return graph
384
1
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() a_ = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] a_ = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Optional[Any]: '''simple docstring''' a_ = { "word_embeddings.weight": "word_embeddings.weight", "word_embeddings.norm.weight": "word_embeddings_layernorm.weight", "word_embeddings.norm.bias": "word_embeddings_layernorm.bias", "weight": "ln_f.weight", "bias": "ln_f.bias", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks a_ = int(re.match(r".*layer_(\d*).*" ,lowercase__ )[1] ) layer_number -= 3 return F"""h.{layer_number}.""" + key def __UpperCAmelCase (lowercase__ ) -> List[str]: '''simple docstring''' if dtype == torch.bool: return 1 / 8 a_ = re.search(r"[^\d](\d+)$" ,str(lowercase__ ) ) if bit_search is None: raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" ) a_ = int(bit_search.groups()[0] ) return bit_size // 8 def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]: '''simple docstring''' if bloom_config_file == "": a_ = BloomConfig() else: a_ = BloomConfig.from_json_file(lowercase__ ) if shard_model: a_ = os.listdir(lowercase__ ) a_ = sorted(filter(lambda lowercase__ : s.startswith("layer" ) and "model_00" in s ,lowercase__ ) ) a_ = {"weight_map": {}, "metadata": {}} a_ = 0 a_ = None a_ = BloomConfig() for j, file in enumerate(lowercase__ ): print("Processing file: {}".format(lowercase__ ) ) a_ = None for i in range(lowercase__ ): # load all TP files a_ = file.replace("model_00" ,F"""model_0{i}""" ) a_ = torch.load(os.path.join(lowercase__ ,lowercase__ ) ,map_location="cpu" ) # Rename keys in the transformers names a_ = list(temp.keys() ) for key in keys: a_ = temp.pop(lowercase__ ) if tensors is None: a_ = temp else: for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel a_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks a_ = torch.cat([tensors[key], temp[key]] ,dim=lowercase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): a_ = tensors[key] / pretraining_tp torch.save( lowercase__ ,os.path.join( lowercase__ ,"pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) ,str(len(lowercase__ ) ).zfill(5 ) ) ,) ,) for key in tensors.keys(): a_ = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: a_ = "pytorch_model_{}-of-{}.bin".format( str(j + 1 ).zfill(5 ) ,str(len(lowercase__ ) ).zfill(5 ) ) a_ = BloomConfig() a_ = pytorch_dump_folder_path + "/" + CONFIG_NAME a_ = total_size with open(lowercase__ ,"w" ,encoding="utf-8" ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowercase__ ,WEIGHTS_NAME + ".index.json" ) ,"w" ,encoding="utf-8" ) as f: a_ = json.dumps(lowercase__ ,indent=2 ,sort_keys=lowercase__ ) + "\n" f.write(lowercase__ ) else: a_ = BloomModel(lowercase__ ) a_ = os.listdir(lowercase__ ) a_ = sorted(filter(lambda lowercase__ : s.startswith("layer" ) and "model_00" in s ,lowercase__ ) ) a_ = None for i, file in enumerate(lowercase__ ): a_ = None for i in range(lowercase__ ): # load all TP files a_ = file.replace("model_00" ,F"""model_0{i}""" ) a_ = torch.load(os.path.join(lowercase__ ,lowercase__ ) ,map_location="cpu" ) # Rename keys in the transformers names a_ = list(temp.keys() ) for key in keys: a_ = temp.pop(lowercase__ ) if tensors is None: a_ = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel a_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks a_ = torch.cat([tensors[key], temp[key]] ,dim=lowercase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): a_ = tensors[key] / pretraining_tp a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ ) assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected""" if missing_keys is None: a_ = set(other_keys.missing_keys ) else: a_ = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F"""The keys {missing_keys} are missing""" # Save pytorch-model os.makedirs(lowercase__ ,exist_ok=lowercase__ ) a_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME a_ = pytorch_dump_folder_path + "/" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" ) if config.torch_dtype is not None: a_ = model.to(config.torch_dtype ) torch.save(model.state_dict() ,lowercase__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase__ ,"w" ,encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) a_ = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
685
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
685
1
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _a = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ): lowercase__ = DebertaVaTokenizer lowercase__ = DebertaVaTokenizerFast lowercase__ = True lowercase__ = True def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = DebertaVaTokenizer(__a , unk_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = '''this is a test''' _UpperCamelCase = '''this is a test''' return input_text, output_text def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = '''<pad>''' _UpperCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<pad>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''[PAD]''') self.assertEqual(len(__a) , 3_00_01) def UpperCAmelCase ( self) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' # fmt: off _UpperCamelCase = ''' \tHeLLo!how \n Are yoU? ''' _UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''') def UpperCAmelCase ( self) -> Any: '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''') def UpperCAmelCase ( self) -> Dict: '''simple docstring''' pass def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' # fmt: off _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' # fmt: off _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' # fmt: off _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' # fmt: off _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' # fmt: off _UpperCamelCase = ''' \tHeLLo!how \n Are yoU? ''' _UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = tokenizer.encode(__a) _UpperCamelCase = rust_tokenizer.encode(__a) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = '''This is a test''' _UpperCamelCase = [13, 1, 43_98, 25, 21, 12_89] _UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] _UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] _UpperCamelCase = DebertaVaTokenizer(__a , keep_accents=__a) _UpperCamelCase = DebertaVaTokenizerFast(__a , keep_accents=__a) _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) # fmt: off _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] _UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] _UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = DebertaVaTokenizer(__a) _UpperCamelCase = tokenizer.encode('''sequence builders''') _UpperCamelCase = tokenizer.encode('''multi-sequence build''') _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __a) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __a , ) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' # fmt: off _UpperCamelCase = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
78
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=2 , __a=24 , __a=16 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , __a=2 , __a=2 , ) -> List[str]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = patch_size _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1 _UpperCamelCase = frequency_out_dimension * time_out_dimension _UpperCamelCase = num_patches + 2 def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = self.get_config() return config, input_values, labels def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ASTModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_values''': input_values} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowercase__ = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = ASTModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37) def UpperCAmelCase ( self) -> int: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) _UpperCamelCase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''input_values'''] self.assertListEqual(arg_names[:1] , __a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = ASTModel.from_pretrained(__a) self.assertIsNotNone(__a) def lowerCamelCase__ ( ) -> List[str]: """simple docstring""" _UpperCamelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' ) _UpperCamelCase , _UpperCamelCase = torchaudio.load(__snake_case ) return audio, sampling_rate @require_torch @require_torchaudio class _UpperCAmelCase( unittest.TestCase ): @cached_property def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''') if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.default_feature_extractor _UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__a) _UpperCamelCase = self.default_feature_extractor _UpperCamelCase , _UpperCamelCase = prepare_audio() _UpperCamelCase = audio.squeeze().numpy() _UpperCamelCase = feature_extractor(__a , sampling_rate=__a , return_tensors='''pt''').to(__a) # forward pass with torch.no_grad(): _UpperCamelCase = model(**__a) # verify the logits _UpperCamelCase = torch.Size((1, 5_27)) self.assertEqual(outputs.logits.shape , __a) _UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602]).to(__a) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
78
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 3 ) -> Tuple: '''simple docstring''' if isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(UpperCamelCase__ ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) UpperCAmelCase = QuantumRegister(UpperCamelCase__ , '''qr''' ) UpperCAmelCase = ClassicalRegister(UpperCamelCase__ , '''cr''' ) UpperCAmelCase = QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ ) UpperCAmelCase = number_of_qubits for i in range(UpperCamelCase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(UpperCamelCase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase__ , UpperCamelCase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(UpperCamelCase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(UpperCamelCase__ , UpperCamelCase__ ) # simulate with 10000 shots UpperCAmelCase = Aer.get_backend('''qasm_simulator''' ) UpperCAmelCase = execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0000 ) return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print( F'Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}' )
130
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase: int = logging.get_logger(__name__) _lowercase: Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase: Dict = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } _lowercase: List[Any] = { '''camembert-base''': 5_1_2, } _lowercase: Dict = '''▁''' class lowerCamelCase__ ( UpperCAmelCase ): UpperCamelCase__ =VOCAB_FILES_NAMES UpperCamelCase__ =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ =["input_ids", "attention_mask"] def __init__( self : str , lowercase__ : int , lowercase__ : Tuple="<s>" , lowercase__ : Optional[int]="</s>" , lowercase__ : Optional[Any]="</s>" , lowercase__ : Any="<s>" , lowercase__ : Union[str, Any]="<unk>" , lowercase__ : Union[str, Any]="<pad>" , lowercase__ : Optional[int]="<mask>" , lowercase__ : str=["<s>NOTUSED", "</s>NOTUSED"] , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : Union[str, Any] , ): # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , ) _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase__ ) ) _lowerCAmelCase = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> _lowerCAmelCase = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} _lowerCAmelCase = len(self.fairseq_tokens_to_ids ) _lowerCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) _lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] _lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None , lowercase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ ) if token_ids_a is None: return [1] + ([0] * len(lowercase__ )) + [1] return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1] def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ): _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): _lowerCAmelCase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : str ): return self.sp_model.encode(lowercase__ , out_type=lowercase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Optional[Any] ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(lowercase__ ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(lowercase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tuple ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[int] ): _lowerCAmelCase = [] _lowerCAmelCase = '' _lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase__ ) + token _lowerCAmelCase = True _lowerCAmelCase = [] else: current_sub_tokens.append(lowercase__ ) _lowerCAmelCase = False out_string += self.sp_model.decode(lowercase__ ) return out_string.strip() def __getstate__( self : Any ): _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self : Optional[Any] , lowercase__ : Union[str, Any] ): _lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : str , lowercase__ : Optional[str] = None ): if not os.path.isdir(lowercase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase = os.path.join( lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase__ , 'wb' ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (out_vocab_file,)
192
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class UpperCamelCase__( __A ): lowerCAmelCase__ : str = 'falcon' lowerCAmelCase__ : Union[str, Any] = ['past_key_values'] def __init__( self ,__UpperCAmelCase=6_50_24 ,__UpperCAmelCase=45_44 ,__UpperCAmelCase=32 ,__UpperCAmelCase=71 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=11 ,__UpperCAmelCase=11 ,**__UpperCAmelCase ,) -> int: A__ = vocab_size # Backward compatibility with n_embed kwarg A__ = kwargs.pop('n_embed' ,__UpperCAmelCase ) A__ = hidden_size if n_embed is None else n_embed A__ = num_hidden_layers A__ = num_attention_heads A__ = layer_norm_epsilon A__ = initializer_range A__ = use_cache A__ = hidden_dropout A__ = attention_dropout A__ = bos_token_id A__ = eos_token_id A__ = num_attention_heads if num_kv_heads is None else num_kv_heads A__ = alibi A__ = new_decoder_architecture A__ = multi_query # Ignored when new_decoder_architecture is True A__ = parallel_attn A__ = bias super().__init__(bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) @property def snake_case__ ( self ) -> Tuple: return self.hidden_size // self.num_attention_heads @property def snake_case__ ( self ) -> int: return not self.alibi
705
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCamelCase__( enum.Enum ): lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Optional[int] = 1 lowerCAmelCase__ : List[Any] = 2 @add_end_docstrings(__A ) class UpperCamelCase__( __A ): lowerCAmelCase__ : Optional[Any] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. A__ = None if self.model.config.prefix is not None: A__ = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. A__ = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. A__ , A__ , A__ = self._sanitize_parameters(prefix=__UpperCAmelCase ,**self._forward_params ) A__ = {**self._preprocess_params, **preprocess_params} A__ = {**self._forward_params, **forward_params} def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict: A__ = {} if prefix is not None: A__ = prefix if prefix: A__ = self.tokenizer( __UpperCAmelCase ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework ) A__ = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']' ) A__ = handle_long_generation preprocess_params.update(__UpperCAmelCase ) A__ = generate_kwargs A__ = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) A__ = ReturnType.TENSORS if return_type is not None: A__ = return_type if clean_up_tokenization_spaces is not None: A__ = clean_up_tokenization_spaces if stop_sequence is not None: A__ = self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) A__ = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*__UpperCAmelCase ,**__UpperCAmelCase ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Dict: A__ = self.tokenizer( prefix + prompt_text ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework ) A__ = prompt_text if handle_long_generation == "hole": A__ = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: A__ = generate_kwargs['max_new_tokens'] else: A__ = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: A__ = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) A__ = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: A__ = inputs['attention_mask'][:, -keep_length:] return inputs def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: A__ = model_inputs['input_ids'] A__ = model_inputs.get('attention_mask' ,__UpperCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: A__ = None A__ = None A__ = 1 else: A__ = input_ids.shape[0] A__ = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. A__ = generate_kwargs.pop('prefix_length' ,0 ) if prefix_length > 0: A__ = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: A__ = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length A__ = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL A__ = self.model.generate(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,**__UpperCAmelCase ) A__ = generated_sequence.shape[0] if self.framework == "pt": A__ = generated_sequence.reshape(__UpperCAmelCase ,out_b // in_b ,*generated_sequence.shape[1:] ) elif self.framework == "tf": A__ = tf.reshape(__UpperCAmelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=ReturnType.FULL_TEXT ,__UpperCAmelCase=True ) -> str: A__ = model_outputs['generated_sequence'][0] A__ = model_outputs['input_ids'] A__ = model_outputs['prompt_text'] A__ = generated_sequence.numpy().tolist() A__ = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: A__ = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text A__ = self.tokenizer.decode( __UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: A__ = 0 else: A__ = len( self.tokenizer.decode( input_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) ) if return_type == ReturnType.FULL_TEXT: A__ = prompt_text + text[prompt_length:] else: A__ = text[prompt_length:] A__ = {'generated_text': all_text} records.append(__UpperCAmelCase ) return records
536
0
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase__ : def __init__( self : Optional[Any],__A : List[str],__A : Optional[int]=1_3,__A : Optional[int]=3_0,__A : Union[str, Any]=2,__A : Optional[int]=3,__A : int=True,__A : Union[str, Any]=True,__A : List[Any]=3_2,__A : Optional[Any]=5,__A : str=4,__A : List[str]=3_7,__A : List[str]="gelu",__A : Optional[Any]=0.1,__A : str=0.1,__A : Any=1_0,__A : Union[str, Any]=0.02,__A : List[str]=3,__A : int=0.6,__A : str=None,): _lowerCamelCase : Any = parent _lowerCamelCase : int = batch_size _lowerCamelCase : Optional[Any] = image_size _lowerCamelCase : Any = patch_size _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : List[str] = use_labels _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : List[str] = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Any = type_sequence_label_size _lowerCamelCase : int = initializer_range _lowerCamelCase : Optional[int] = mask_ratio _lowerCamelCase : Tuple = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 _lowerCamelCase : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : Optional[int] ): _lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size ) _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): return ViTMAEConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__lowerCamelCase,initializer_range=self.initializer_range,mask_ratio=self.mask_ratio,) def lowerCamelCase_ ( self : Dict,__A : Dict,__A : Union[str, Any],__A : str ): _lowerCamelCase : str = ViTMAEModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _lowerCamelCase : str = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int],__A : Dict,__A : Dict,__A : Optional[int] ): _lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _lowerCamelCase : Optional[int] = model(__lowerCamelCase ) _lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2 _lowerCamelCase : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : Optional[Any] = ViTMAEForPreTraining(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : List[str] = model(__lowerCamelCase ) _lowerCamelCase : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : List[Any] ): _lowerCamelCase : int = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = config_and_inputs _lowerCamelCase : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ): lowerCAmelCase_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowerCAmelCase_ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): _lowerCamelCase : str = ViTMAEModelTester(self ) _lowerCamelCase : Union[str, Any] = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase,hidden_size=3_7 ) def lowerCamelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def lowerCamelCase_ ( self : Optional[int] ): pass def lowerCamelCase_ ( self : List[str] ): _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings(),(nn.Module) ) _lowerCamelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase,nn.Linear ) ) def lowerCamelCase_ ( self : List[str] ): _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCamelCase ) _lowerCamelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1],__lowerCamelCase ) def lowerCamelCase_ ( self : int ): _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCamelCase_ ( self : Any ): _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCamelCase_ ( self : Optional[int],__A : int,__A : str,__A : Tuple ): # make masks reproducible np.random.seed(2 ) _lowerCamelCase : Any = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCamelCase : Tuple = torch.from_numpy(__lowerCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowerCamelCase : Any = pt_noise super().check_pt_tf_models(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ) def lowerCamelCase_ ( self : Tuple ): _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _lowerCamelCase : List[str] = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) ) _lowerCamelCase : int = outputs[0].cpu().numpy() _lowerCamelCase : Any = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) _lowerCamelCase : List[str] = model_class.from_pretrained(__lowerCamelCase ) model.to(__lowerCamelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) ) # Make sure we don't have nans _lowerCamelCase : Tuple = after_outputs[0].cpu().numpy() _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCamelCase,1e-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCamelCase_ ( self : Optional[int] ): pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCamelCase_ ( self : List[str] ): pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCamelCase_ ( self : Optional[Any] ): pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def lowerCamelCase_ ( self : Tuple ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCamelCase_ ( self : Optional[Any] ): pass @slow def lowerCamelCase_ ( self : Any ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Union[str, Any] = ViTMAEModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def A_ ( ): """simple docstring""" _lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCAmelCase__ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : str ): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) _lowerCamelCase : Tuple = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCamelCase ) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Union[str, Any] = prepare_img() _lowerCamelCase : int = image_processor(images=__lowerCamelCase,return_tensors="pt" ).to(__lowerCamelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowerCamelCase : Any = ViTMAEConfig() _lowerCamelCase : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _lowerCamelCase : List[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCamelCase,noise=torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase ) ) # verify the logits _lowerCamelCase : int = torch.Size((1, 1_9_6, 7_6_8) ) self.assertEqual(outputs.logits.shape,__lowerCamelCase ) _lowerCamelCase : List[str] = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3],expected_slice.to(__lowerCamelCase ),atol=1e-4 ) )
44
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _SCREAMING_SNAKE_CASE : Optional[Any] = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _SCREAMING_SNAKE_CASE : Dict = concatenate_datasets _SCREAMING_SNAKE_CASE : Optional[Any] = DownloadConfig _SCREAMING_SNAKE_CASE : Dict = DownloadManager _SCREAMING_SNAKE_CASE : str = DownloadMode _SCREAMING_SNAKE_CASE : Dict = DownloadConfig _SCREAMING_SNAKE_CASE : Optional[int] = DownloadMode _SCREAMING_SNAKE_CASE : Any = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
493
0
from math import pow def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , ): '''simple docstring''' if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(pow(lowercase__ , lowercase__ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = backtrack( lowercase__ , lowercase__ , current_number + 1 , lowercase__ , lowercase__ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = backtrack( lowercase__ , lowercase__ , current_number + 1 , lowercase__ , lowercase__ ) return current_sum, solutions_count def _a ( lowercase__ : int , lowercase__ : int ): '''simple docstring''' if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.' ) return backtrack(lowercase__ , lowercase__ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
636
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'OwlViTImageProcessor' lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a_ , a_ ) def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )): SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )] elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ): SCREAMING_SNAKE_CASE__ : Any = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(a_ ) != max_num_queries: SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ )) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ ) encodings.append(a_ ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding() SCREAMING_SNAKE_CASE__ : List[str] = input_ids SCREAMING_SNAKE_CASE__ : Tuple = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ : Any = BatchEncoding() SCREAMING_SNAKE_CASE__ : Dict = self.image_processor( a_ , return_tensors=a_ , **a_ ).pixel_values SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]: """simple docstring""" return self.image_processor.post_process(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*a_ , **a_ ) def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*a_ , **a_ ) def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Tuple )-> Any: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , ) return self.image_processor_class @property def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , ) return self.image_processor
636
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case = { '''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ '''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PegasusXForConditionalGeneration''', '''PegasusXModel''', '''PegasusXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
103
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } lowercase_ = { 'b0': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_24, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_40, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 14_08, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_60, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 15_36, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_00, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 17_92, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_80, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 20_48, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_56, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 23_04, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_28, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 25_60, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_00, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def UpperCAmelCase ( _lowercase : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase_ = EfficientNetConfig() lowerCAmelCase_ = CONFIG_MAP[model_name]['''hidden_dim'''] lowerCAmelCase_ = CONFIG_MAP[model_name]['''width_coef'''] lowerCAmelCase_ = CONFIG_MAP[model_name]['''depth_coef'''] lowerCAmelCase_ = CONFIG_MAP[model_name]['''image_size'''] lowerCAmelCase_ = CONFIG_MAP[model_name]['''dropout_rate'''] lowerCAmelCase_ = CONFIG_MAP[model_name]['''dw_padding'''] lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = 1_0_0_0 lowerCAmelCase_ = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_lowercase ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase ( ) -> List[str]: """simple docstring""" lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def UpperCAmelCase ( _lowercase : Dict ) -> int: """simple docstring""" lowerCAmelCase_ = CONFIG_MAP[model_name]['''image_size'''] lowerCAmelCase_ = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_lowercase , ) return preprocessor def UpperCAmelCase ( _lowercase : List[Any] ) -> int: """simple docstring""" lowerCAmelCase_ = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] lowerCAmelCase_ = sorted(set(_lowercase ) ) lowerCAmelCase_ = len(_lowercase ) lowerCAmelCase_ = {b: str(_lowercase ) for b, i in zip(_lowercase , range(_lowercase ) )} lowerCAmelCase_ = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: lowerCAmelCase_ = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) lowerCAmelCase_ = {} for item in rename_keys: if item[0] in original_param_names: lowerCAmelCase_ = '''efficientnet.''' + item[1] lowerCAmelCase_ = '''classifier.weight''' lowerCAmelCase_ = '''classifier.bias''' return key_mapping def UpperCAmelCase ( _lowercase : Dict , _lowercase : List[str] , _lowercase : str ) -> Any: """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue lowerCAmelCase_ = key_mapping[key] if "_conv" in key and "kernel" in key: lowerCAmelCase_ = torch.from_numpy(_lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowerCAmelCase_ = torch.from_numpy(_lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowerCAmelCase_ = torch.from_numpy(np.transpose(_lowercase ) ) else: lowerCAmelCase_ = torch.from_numpy(_lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_lowercase ) @torch.no_grad() def UpperCAmelCase ( _lowercase : str , _lowercase : Any , _lowercase : List[Any] , _lowercase : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ = model_classes[model_name]( include_top=_lowercase , weights='''imagenet''' , input_tensor=_lowercase , input_shape=_lowercase , pooling=_lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , ) lowerCAmelCase_ = original_model.trainable_variables lowerCAmelCase_ = original_model.non_trainable_variables lowerCAmelCase_ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowerCAmelCase_ = param.numpy() lowerCAmelCase_ = list(tf_params.keys() ) # Load HuggingFace model lowerCAmelCase_ = get_efficientnet_config(_lowercase ) lowerCAmelCase_ = EfficientNetForImageClassification(_lowercase ).eval() lowerCAmelCase_ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) lowerCAmelCase_ = rename_keys(_lowercase ) replace_params(_lowercase , _lowercase , _lowercase ) # Initialize preprocessor and preprocess input image lowerCAmelCase_ = convert_image_processor(_lowercase ) lowerCAmelCase_ = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): lowerCAmelCase_ = hf_model(**_lowercase ) lowerCAmelCase_ = outputs.logits.detach().numpy() # Original model inference lowerCAmelCase_ = False lowerCAmelCase_ = CONFIG_MAP[model_name]['''image_size'''] lowerCAmelCase_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowerCAmelCase_ = image.img_to_array(_lowercase ) lowerCAmelCase_ = np.expand_dims(_lowercase , axis=0 ) lowerCAmelCase_ = original_model.predict(_lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_lowercase , _lowercase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_lowercase ): os.mkdir(_lowercase ) # Save converted model and image processor hf_model.save_pretrained(_lowercase ) preprocessor.save_pretrained(_lowercase ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) lowerCAmelCase_ = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(_lowercase ) hf_model.push_to_hub(_lowercase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') lowercase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
552
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _SCREAMING_SNAKE_CASE = transforms.Compose( [ transforms.Resize((2_5_6, 2_5_6)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ): if isinstance(lowerCamelCase_ , torch.Tensor ): return image elif isinstance(lowerCamelCase_ , PIL.Image.Image ): __lowercase = [image] __lowercase = [trans(img.convert('''RGB''' ) ) for img in image] __lowercase = torch.stack(lowerCamelCase_ ) return image class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM __lowercase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict: '''simple docstring''' __lowercase = min(int(num_inference_steps * strength ) ,_lowerCamelCase ) __lowercase = max(num_inference_steps - init_timestep ,0 ) __lowercase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase ,(torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCamelCase )}" ) __lowercase = image.to(device=_lowerCamelCase ,dtype=_lowerCamelCase ) if isinstance(_lowerCamelCase ,_lowerCamelCase ) and len(_lowerCamelCase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) __lowercase = init_latents.shape __lowercase = randn_tensor(_lowerCamelCase ,generator=_lowerCamelCase ,device=_lowerCamelCase ,dtype=_lowerCamelCase ) # get latents print('''add noise to latents at timestep''' ,_lowerCamelCase ) __lowercase = self.scheduler.add_noise(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) __lowercase = init_latents return latents @torch.no_grad() def __call__(self ,_lowerCamelCase = None ,_lowerCamelCase = 0.8 ,_lowerCamelCase = 1 ,_lowerCamelCase = None ,_lowerCamelCase = 0.0 ,_lowerCamelCase = 50 ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(_lowerCamelCase ) # 2. Preprocess image __lowercase = preprocess(_lowerCamelCase ) # 3. set timesteps self.scheduler.set_timesteps(_lowerCamelCase ,device=self.device ) __lowercase , __lowercase = self.get_timesteps(_lowerCamelCase ,_lowerCamelCase ,self.device ) __lowercase = timesteps[:1].repeat(_lowerCamelCase ) # 4. Prepare latent variables __lowercase = self.prepare_latents(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,self.unet.dtype ,self.device ,_lowerCamelCase ) __lowercase = latents # 5. Denoising loop for t in self.progress_bar(_lowerCamelCase ): # 1. predict noise model_output __lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __lowercase = self.scheduler.step( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,eta=_lowerCamelCase ,use_clipped_model_output=_lowerCamelCase ,generator=_lowerCamelCase ,).prev_sample __lowercase = (image / 2 + 0.5).clamp(0 ,1 ) __lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowerCamelCase )
56
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
56
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ : Optional[int] = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _UpperCamelCase (_lowerCamelCase : Any )-> int: '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowerCamelCase ) def _UpperCamelCase (_lowerCamelCase : int )-> Tuple: '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
24
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline A_ : List[Any] =argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False) parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""") parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""") A_ : List[Any] =parser.parse_args() A_ : Tuple ="""cpu""" A_ : List[Any] ="""a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings""" A_ : Union[str, Any] ="""path-to-your-trained-model""" A_ : str =StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: A_ : Dict =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) A_ : List[Any] =pipe.to(device) # to channels last A_ : Optional[int] =pipe.unet.to(memory_format=torch.channels_last) A_ : Optional[int] =pipe.vae.to(memory_format=torch.channels_last) A_ : Optional[Any] =pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: A_ : Union[str, Any] =pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex A_ : Any =torch.randn(2, 4, 64, 64) A_ : List[Any] =torch.rand(1) * 999 A_ : str =torch.randn(2, 77, 768) A_ : Optional[int] =(sample, timestep, encoder_hidden_status) try: A_ : Dict =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: A_ : List[Any] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) A_ : int =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) A_ : Tuple =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: A_ : Any =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute A_ : Any =666 A_ : int =torch.Generator(device).manual_seed(seed) A_ : Any ={"""generator""": generator} if args.steps is not None: A_ : List[str] =args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): A_ : Any =pipe(prompt, **generate_kwargs).images[0] # save image image.save("""generated.png""")
483
0
import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): UpperCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCamelCase = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): UpperCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCamelCase = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) UpperCamelCase = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) UpperCamelCase = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , "bert-base is not a local folder and is not a valid model identifier" ): UpperCamelCase = FlaxAutoModel.from_pretrained("bert-base" ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCamelCase = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision="aaaaaa" ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ): UpperCamelCase = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , "Use `from_pt=True` to load this model" ): UpperCamelCase = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
414
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() __a : Union[str, Any] = logging.get_logger(__name__) def __magic_name__ ( lowercase_ ) -> Dict: '''simple docstring''' UpperCamelCase = torch.load(lowercase_ , map_location="cpu" ) if "model" in sd.keys(): UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )["model"] # pop unnecessary weights UpperCamelCase = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(lowercase_ ) UpperCamelCase = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: UpperCamelCase = sd.pop(lowercase_ ) UpperCamelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: UpperCamelCase = sd[key] # We split QKV in separate Q,K,V UpperCamelCase = key.replace(".qkv_proj." , ".q_proj." ) UpperCamelCase = key.replace(".qkv_proj." , ".k_proj." ) UpperCamelCase = key.replace(".qkv_proj." , ".v_proj." ) UpperCamelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 UpperCamelCase , UpperCamelCase , UpperCamelCase = torch.split(lowercase_ , depth // 3 , dim=0 ) UpperCamelCase = q UpperCamelCase = k UpperCamelCase = v del sd[key] return sd @torch.no_grad() def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None ) -> str: '''simple docstring''' UpperCamelCase = load_checkpoint(lowercase_ ) if config is not None: UpperCamelCase = OPTConfig.from_pretrained(lowercase_ ) else: UpperCamelCase = OPTConfig() UpperCamelCase = OPTModel(lowercase_ ).half().eval() model.load_state_dict(lowercase_ ) # Check results Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) model.save_pretrained(lowercase_ ) if __name__ == "__main__": __a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") __a : Dict = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
414
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) __SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary' ) __SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(6_4, 6_4) ) __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) __SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) __SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __SCREAMING_SNAKE_CASE = 'Normal' if result[0][0] == 1: __SCREAMING_SNAKE_CASE = 'Abnormality detected'
688
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ ) # set absolute/relative position embeddings parameter a__ : List[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ ) elif task == "WTQ": # run_task_main.py hparams a__ : List[str] = 4 a__ : Optional[int] = True # hparam_utils.py hparams a__ : List[Any] = 0.664694 a__ : List[Any] = 0.207951 a__ : Union[str, Any] = 0.121194 a__ : Optional[Any] = True a__ : Optional[int] = True a__ : List[str] = False a__ : Union[str, Any] = 0.0352513 a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams a__ : Tuple = 4 a__ : Dict = False # hparam_utils.py hparams a__ : str = 36.4519 a__ : str = 0.903421 a__ : Optional[Any] = 222.088 a__ : Dict = True a__ : Dict = True a__ : Dict = True a__ : str = 0.763141 a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ ) elif task == "TABFACT": a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ ) elif task == "MLM": a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": a__ : List[str] = TapasModel(config=lowerCAmelCase__ ) else: raise ValueError(F'Task {task} not supported.' ) print(F'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model (weights and configuration) print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowerCAmelCase__ ) # Save tokenizer files print(F'Save tokenizer files to {pytorch_dump_path}' ) a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 ) tokenizer.save_pretrained(lowerCAmelCase__ ) print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
688
1
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' # Load checkpoint lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' ) lowercase = chkpt['''model'''] # We have the base model one level deeper than the original XLM repository lowercase = {} for k, v in state_dict.items(): if "pred_layer" in k: lowercase = v else: lowercase = v lowercase = chkpt['''params'''] lowercase = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )} lowercase = chkpt['''dico_word2id'''] lowercase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()} # Save pytorch-model lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME lowercase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file'''] print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' ) print(f'Save vocab file to {pytorch_config_dump_path}' ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' ) if __name__ == "__main__": lowercase__ :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowercase__ :Tuple = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
633
from numpy import exp, pi, sqrt def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ): '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
633
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Dict = """wavlm""" def __init__( self , __UpperCAmelCase=3_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=8_0_0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=1_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=8_0 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ :int = hidden_size lowerCAmelCase__ :Optional[Any] = feat_extract_norm lowerCAmelCase__ :List[str] = feat_extract_activation lowerCAmelCase__ :List[Any] = list(__UpperCAmelCase ) lowerCAmelCase__ :int = list(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = list(__UpperCAmelCase ) lowerCAmelCase__ :str = conv_bias lowerCAmelCase__ :Any = num_buckets lowerCAmelCase__ :Optional[Any] = max_bucket_distance lowerCAmelCase__ :str = num_conv_pos_embeddings lowerCAmelCase__ :int = num_conv_pos_embedding_groups lowerCAmelCase__ :Dict = len(self.conv_dim ) lowerCAmelCase__ :int = num_hidden_layers lowerCAmelCase__ :int = intermediate_size lowerCAmelCase__ :List[str] = hidden_act lowerCAmelCase__ :Any = num_attention_heads lowerCAmelCase__ :Optional[Any] = hidden_dropout lowerCAmelCase__ :Union[str, Any] = attention_dropout lowerCAmelCase__ :Tuple = activation_dropout lowerCAmelCase__ :Union[str, Any] = feat_proj_dropout lowerCAmelCase__ :Optional[Any] = final_dropout lowerCAmelCase__ :int = layerdrop lowerCAmelCase__ :Union[str, Any] = layer_norm_eps lowerCAmelCase__ :int = initializer_range lowerCAmelCase__ :Any = num_ctc_classes lowerCAmelCase__ :int = vocab_size lowerCAmelCase__ :Dict = do_stable_layer_norm lowerCAmelCase__ :Tuple = use_weighted_layer_sum lowerCAmelCase__ :Dict = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ :str = apply_spec_augment lowerCAmelCase__ :Any = mask_time_prob lowerCAmelCase__ :Union[str, Any] = mask_time_length lowerCAmelCase__ :List[Any] = mask_time_min_masks lowerCAmelCase__ :Dict = mask_feature_prob lowerCAmelCase__ :Union[str, Any] = mask_feature_length # parameters for pretraining with codevector quantized representations lowerCAmelCase__ :Optional[Any] = num_codevectors_per_group lowerCAmelCase__ :List[str] = num_codevector_groups lowerCAmelCase__ :Union[str, Any] = contrastive_logits_temperature lowerCAmelCase__ :List[str] = num_negatives lowerCAmelCase__ :Union[str, Any] = codevector_dim lowerCAmelCase__ :Optional[Any] = proj_codevector_dim lowerCAmelCase__ :Optional[int] = diversity_loss_weight # ctc loss lowerCAmelCase__ :Any = ctc_loss_reduction lowerCAmelCase__ :str = ctc_zero_infinity # adapter lowerCAmelCase__ :Optional[int] = add_adapter lowerCAmelCase__ :Tuple = adapter_kernel_size lowerCAmelCase__ :Tuple = adapter_stride lowerCAmelCase__ :List[Any] = num_adapter_layers lowerCAmelCase__ :Dict = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase__ :Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase__ :int = list(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ :Any = list(__UpperCAmelCase ) lowerCAmelCase__ :Any = xvector_output_dim @property def snake_case ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
93
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : str ,a__ : Union[str, Any] ,a__ : Any=13 ,a__ : Dict=30 ,a__ : Union[str, Any]=2 ,a__ : Optional[Any]=3 ,a__ : List[Any]=True ,a__ : str=True ,a__ : Tuple=32 ,a__ : Any=5 ,a__ : Dict=4 ,a__ : Dict=37 ,a__ : List[Any]="gelu" ,a__ : List[Any]=0.1 ,a__ : Union[str, Any]=0.1 ,a__ : Optional[int]=10 ,a__ : Dict=0.02 ,a__ : List[str]=None ,): a__ = parent a__ = batch_size a__ = image_size a__ = patch_size a__ = num_channels a__ = is_training a__ = use_labels a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = intermediate_size a__ = hidden_act a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = type_sequence_label_size a__ = initializer_range a__ = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ = (image_size // patch_size) ** 2 a__ = num_patches + 1 def lowerCAmelCase_ ( self : Dict ): a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ = None if self.use_labels: a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) a__ = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : Tuple ): return ViTMSNConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def lowerCAmelCase_ ( self : str ,a__ : Any ,a__ : Tuple ,a__ : Optional[Any] ): a__ = ViTMSNModel(config=a__ ) model.to(a__ ) model.eval() a__ = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[Any] ,a__ : Union[str, Any] ,a__ : List[Any] ,a__ : List[str] ): a__ = self.type_sequence_label_size a__ = ViTMSNForImageClassification(a__ ) model.to(a__ ) model.eval() a__ = model(a__ ,labels=a__ ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ = 1 a__ = ViTMSNForImageClassification(a__ ) model.to(a__ ) model.eval() a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ = model(a__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : Union[str, Any] ): a__ = self.prepare_config_and_inputs() a__ , a__ , a__ = config_and_inputs a__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowerCAmelCase_ ( self : List[Any] ): a__ = ViTMSNModelTester(self ) a__ = ConfigTester(self ,config_class=a__ ,has_text_modality=a__ ,hidden_size=37 ) def lowerCAmelCase_ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def lowerCAmelCase_ ( self : Optional[Any] ): pass def lowerCAmelCase_ ( self : Optional[int] ): a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ = model_class(a__ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) a__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a__ ,nn.Linear ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ = model_class(a__ ) a__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ = [*signature.parameters.keys()] a__ = ["pixel_values"] self.assertListEqual(arg_names[:1] ,a__ ) def lowerCAmelCase_ ( self : Optional[Any] ): a__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def lowerCAmelCase_ ( self : Optional[int] ): a__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def lowerCAmelCase_ ( self : Optional[int] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ = ViTMSNModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def _lowerCAmelCase (): """simple docstring""" a__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase_ ( self : Dict ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self : Union[str, Any] ): torch.manual_seed(2 ) a__ = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(a__ ) a__ = self.default_image_processor a__ = prepare_img() a__ = image_processor(images=a__ ,return_tensors="pt" ).to(a__ ) # forward pass with torch.no_grad(): a__ = model(**a__ ) # verify the logits a__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,a__ ) a__ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a__ ,atol=1e-4 ) )
331
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=False , __a=True , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = image_size __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {"height": 18, "width": 20} __lowerCAmelCase = do_thumbnail __lowerCAmelCase = do_align_axis __lowerCAmelCase = do_pad __lowerCAmelCase = do_normalize __lowerCAmelCase = image_mean __lowerCAmelCase = image_std def snake_case ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[Any] =DonutImageProcessor if is_vision_available() else None def snake_case ( self ): __lowerCAmelCase = DonutImageProcessingTester(self ) @property def snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , "do_resize" ) ) self.assertTrue(hasattr(__a , "size" ) ) self.assertTrue(hasattr(__a , "do_thumbnail" ) ) self.assertTrue(hasattr(__a , "do_align_long_axis" ) ) self.assertTrue(hasattr(__a , "do_pad" ) ) self.assertTrue(hasattr(__a , "do_normalize" ) ) self.assertTrue(hasattr(__a , "image_mean" ) ) self.assertTrue(hasattr(__a , "image_std" ) ) def snake_case ( self ): __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 20} ) __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"height": 84, "width": 42} ) def snake_case ( self ): pass @is_flaky() def snake_case ( self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) @is_flaky() def snake_case ( self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) @is_flaky() def snake_case ( self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
701
"""simple docstring""" from itertools import product def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = sides_number __lowerCAmelCase = max_face_number * dice_number __lowerCAmelCase = [0] * (max_total + 1) __lowerCAmelCase = 1 __lowerCAmelCase = range(_UpperCamelCase , max_face_number + 1 ) for dice_numbers in product(_UpperCamelCase , repeat=_UpperCamelCase ): __lowerCAmelCase = sum(_UpperCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = total_frequency_distribution( sides_number=4 , dice_number=9 ) __lowerCAmelCase = total_frequency_distribution( sides_number=6 , dice_number=6 ) __lowerCAmelCase = 0 __lowerCAmelCase = 9 __lowerCAmelCase = 4 * 9 __lowerCAmelCase = 6 for peter_total in range(_UpperCamelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __lowerCAmelCase = (4**9) * (6**6) __lowerCAmelCase = peter_wins_count / total_games_number __lowerCAmelCase = round(_UpperCamelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'''{solution() = }''')
282
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: a =None a =logging.get_logger(__name__) a ={'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} a ={ 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } a ={ 'google/rembert': 256, } a ='▁' class __UpperCAmelCase ( __lowerCAmelCase ): A__ : List[Any] = VOCAB_FILES_NAMES A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : str = RemBertTokenizer def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="[CLS]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<unk>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<pad>" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) lowerCamelCase__ =do_lower_case lowerCamelCase__ =remove_space lowerCamelCase__ =keep_accents lowerCamelCase__ =vocab_file lowerCamelCase__ =False if not self.vocab_file else True def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): lowerCamelCase__ =[self.sep_token_id] lowerCamelCase__ =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1] def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): lowerCamelCase__ =[self.sep_token_id] lowerCamelCase__ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error("Vocabulary path ({}) should be a directory".format(_lowerCamelCase ) ) return lowerCamelCase__ =os.path.join( _lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
530
"""simple docstring""" from __future__ import annotations a ='#' class __UpperCAmelCase : def __init__( self ): lowerCamelCase__ ={} def _a ( self , _lowerCamelCase ): lowerCamelCase__ =self._trie for char in text: if char not in trie: lowerCamelCase__ ={} lowerCamelCase__ =trie[char] lowerCamelCase__ =True def _a ( self , _lowerCamelCase ): lowerCamelCase__ =self._trie for char in prefix: if char in trie: lowerCamelCase__ =trie[char] else: return [] return self._elements(_lowerCamelCase ) def _a ( self , _lowerCamelCase ): lowerCamelCase__ =[] for c, v in d.items(): lowerCamelCase__ =[" "] if c == END else [(c + s) for s in self._elements(_lowerCamelCase )] result.extend(_lowerCamelCase ) return tuple(_lowerCamelCase ) a =Trie() a =('depart', 'detergent', 'daring', 'dog', 'deer', 'deal') for word in words: trie.insert_word(word) def lowerCamelCase_ ( __lowerCAmelCase ) -> tuple: '''simple docstring''' lowerCamelCase__ =trie.find_word(__lowerCAmelCase ) return tuple(string + word for word in suffixes ) def lowerCamelCase_ ( ) -> None: '''simple docstring''' print(autocomplete_using_trie("de" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
530
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _A : int = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _lowercase ( datasets.BuilderConfig ): lowercase_ = None def UpperCAmelCase ( a_, a_, ): '''simple docstring''' import pyspark def generate_fn(): lowerCamelCase : str = df.select('*', pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: lowerCamelCase : Tuple = df_with_partition_id.select('*' ).where(F"""part_id = {partition_id}""" ).drop('part_id' ) lowerCamelCase : Any = partition_df.collect() lowerCamelCase : List[str] = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class _lowercase ( _BaseExamplesIterable ): def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=None , ) -> Any: lowerCamelCase : Any = df lowerCamelCase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase : Dict = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ) -> str: yield from self.generate_examples_fn() def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[str]: lowerCamelCase : Dict = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__A ) return SparkExamplesIterable(self.df , partition_order=__A ) def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict: lowerCamelCase : Union[str, Any] = self.split_shard_indices_by_worker(__A , __A ) return SparkExamplesIterable(self.df , partition_order=__A ) @property def _UpperCamelCase ( self ) -> List[Any]: return len(self.partition_order ) class _lowercase ( datasets.DatasetBuilder ): lowercase_ = SparkConfig def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> Dict: import pyspark lowerCamelCase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase : int = df lowerCamelCase : Dict = working_dir super().__init__( cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , ) def _UpperCamelCase ( self ) -> Tuple: # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__A ) lowerCamelCase : List[str] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__A , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase : int = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def _UpperCamelCase ( self ) -> Optional[Any]: return datasets.DatasetInfo(features=self.config.features ) def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Optional[int]: import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) lowerCamelCase : List[str] = self.df.count() lowerCamelCase : Dict = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase : int = ( self.df.limit(__A ) .repartition(1 ) .mapInArrow(__A , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase : List[str] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase : int = min(__A , int(approx_total_size / max_shard_size ) ) lowerCamelCase : Any = self.df.repartition(__A ) def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Optional[Any]: import pyspark lowerCamelCase : str = ParquetWriter if file_format == "parquet" else ArrowWriter lowerCamelCase : List[str] = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath lowerCamelCase : Tuple = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase : str = self.config.features lowerCamelCase : Tuple = self._writer_batch_size lowerCamelCase : str = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase : Tuple = pyspark.TaskContext().taskAttemptId() lowerCamelCase : int = next(__A , __A ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) lowerCamelCase : int = 0 lowerCamelCase : Dict = writer_class( features=__A , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , ) lowerCamelCase : Union[str, Any] = pa.Table.from_batches([first_batch] ) writer.write_table(__A ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase : Any = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 lowerCamelCase : Dict = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , ) lowerCamelCase : int = pa.Table.from_batches([batch] ) writer.write_table(__A ) if writer._num_bytes > 0: lowerCamelCase : List[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__A ) ): lowerCamelCase : Optional[int] = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) ) shutil.move(__A , __A ) lowerCamelCase : Dict = ( self.df.mapInArrow(__A , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = "arrow" , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> Any: self._validate_cache_dir() lowerCamelCase : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__A ) lowerCamelCase : int = not is_remote_filesystem(self._fs ) lowerCamelCase : List[Any] = os.path.join if is_local else posixpath.join lowerCamelCase : List[Any] = "-TTTTT-SSSSS-of-NNNNN" lowerCamelCase : Optional[int] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" lowerCamelCase : Union[str, Any] = path_join(self._output_dir , __A ) lowerCamelCase : Tuple = 0 lowerCamelCase : Tuple = 0 lowerCamelCase : Optional[int] = 0 lowerCamelCase : Union[str, Any] = [] lowerCamelCase : Dict = [] for task_id, content in self._prepare_split_single(__A , __A , __A ): ( lowerCamelCase ) : Optional[Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__A ) lowerCamelCase : Dict = total_num_examples lowerCamelCase : Tuple = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: lowerCamelCase : Union[str, Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): rename( __A , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , ) lowerCamelCase : Optional[Any] = [] lowerCamelCase : List[str] = 0 for i in range(len(__A ) ): lowerCamelCase : Optional[int] = task_id_and_num_shards[i] for shard_id in range(__A ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda UpperCAmelCase_ : _rename_shard(*__A ) ).collect() else: # don't use any pattern lowerCamelCase : Dict = 0 lowerCamelCase : Optional[int] = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(__A , '' ) , ) def _UpperCamelCase ( self , UpperCAmelCase_ , ) -> Dict: return SparkExamplesIterable(self.df )
703
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCAmelCase ( ): '''simple docstring''' raise RuntimeError('CUDA out of memory.' ) class _lowercase ( nn.Module ): def __init__( self ) -> Optional[Any]: super().__init__() lowerCamelCase : Dict = nn.Linear(3 , 4 ) lowerCamelCase : Optional[int] = nn.BatchNormad(4 ) lowerCamelCase : List[str] = nn.Linear(4 , 5 ) def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Dict: return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase_ ) ) ) class _lowercase ( unittest.TestCase ): def _UpperCamelCase ( self ) -> Dict: lowerCamelCase : Union[str, Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(UpperCAmelCase_ ): nonlocal batch_sizes batch_sizes.append(UpperCAmelCase_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] ) def _UpperCamelCase ( self ) -> Any: lowerCamelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ ): nonlocal batch_sizes batch_sizes.append(UpperCAmelCase_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCamelCase , lowerCamelCase : List[str] = mock_training_loop_function('hello' ) self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, 'hello'] ) def _UpperCamelCase ( self ) -> List[str]: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(UpperCAmelCase_ ): pass with self.assertRaises(UpperCAmelCase_ ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def _UpperCamelCase ( self ) -> List[str]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(UpperCAmelCase_ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(UpperCAmelCase_ ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def _UpperCamelCase ( self ) -> Any: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(UpperCAmelCase_ ) as cm: mock_training_loop_function(128 , 'hello' , 'world' ) self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] ) self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] ) def _UpperCamelCase ( self ) -> List[str]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(UpperCAmelCase_ ): raise ValueError('Oops, we had an error!' ) with self.assertRaises(UpperCAmelCase_ ) as cm: mock_training_loop_function() self.assertIn('Oops, we had an error!' , cm.exception.args[0] ) @require_cuda def _UpperCamelCase ( self ) -> Union[str, Any]: lowerCamelCase : List[str] = torch.cuda.memory_allocated() lowerCamelCase : Optional[int] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase_ ) lowerCamelCase : Tuple = release_memory(UpperCAmelCase_ ) self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase_ )
133
0
'''simple docstring''' from __future__ import annotations def snake_case ( snake_case : str , snake_case : str ) -> bool: """simple docstring""" lowerCAmelCase = get_failure_array(__lowercase ) # 2) Step through text searching for pattern lowerCAmelCase , lowerCAmelCase = 0, 0 # index into text, pattern while i < len(__lowercase ): if pattern[j] == text[i]: if j == (len(__lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: lowerCAmelCase = failure[j - 1] continue i += 1 return False def snake_case ( snake_case : str ) -> list[int]: """simple docstring""" lowerCAmelCase = [0] lowerCAmelCase = 0 lowerCAmelCase = 1 while j < len(__lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: lowerCAmelCase = failure[i - 1] continue j += 1 failure.append(__lowercase ) return failure if __name__ == "__main__": # Test 1) _UpperCamelCase : Optional[int] = '''abc1abc12''' _UpperCamelCase : int = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _UpperCamelCase : Tuple = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) _UpperCamelCase : List[Any] = '''ABABX''' _UpperCamelCase : Tuple = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) _UpperCamelCase : Union[str, Any] = '''AAAB''' _UpperCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) _UpperCamelCase : Union[str, Any] = '''abcdabcy''' _UpperCamelCase : List[Any] = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) _UpperCamelCase : int = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
284
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() a__ : Optional[int] =logging.get_logger(__name__) set_seed(770) a__ : str ={ '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } a__ : str ={ '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } a__ : Dict =os.path.dirname(os.path.abspath(__file__)) a__ : str =os.path.join(os.path.expanduser('''~'''), '''.cache''') a__ : str =os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def lowercase__ ( __lowercase : List[str] , __lowercase : Optional[int]=False ) -> List[str]: """simple docstring""" __UpperCamelCase = model_type if use_small: key += "_small" return os.path.join(__lowercase , REMOTE_MODEL_PATHS[key]['file_name'] ) def lowercase__ ( __lowercase : Optional[int] , __lowercase : int ) -> str: """simple docstring""" os.makedirs(__lowercase , exist_ok=__lowercase ) hf_hub_download(repo_id=__lowercase , filename=__lowercase , local_dir=__lowercase ) def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int]=False , __lowercase : Tuple="text" ) -> Optional[Any]: """simple docstring""" if model_type == "text": __UpperCamelCase = BarkSemanticModel __UpperCamelCase = BarkSemanticConfig __UpperCamelCase = BarkSemanticGenerationConfig elif model_type == "coarse": __UpperCamelCase = BarkCoarseModel __UpperCamelCase = BarkCoarseConfig __UpperCamelCase = BarkCoarseGenerationConfig elif model_type == "fine": __UpperCamelCase = BarkFineModel __UpperCamelCase = BarkFineConfig __UpperCamelCase = BarkFineGenerationConfig else: raise NotImplementedError() __UpperCamelCase = F'''{model_type}_small''' if use_small else model_type __UpperCamelCase = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(__lowercase ): logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info['repo_id'] , model_info['file_name'] ) __UpperCamelCase = torch.load(__lowercase , map_location=__lowercase ) # this is a hack __UpperCamelCase = checkpoint['model_args'] if "input_vocab_size" not in model_args: __UpperCamelCase = model_args['vocab_size'] __UpperCamelCase = model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments __UpperCamelCase = model_args.pop('n_head' ) __UpperCamelCase = model_args.pop('n_embd' ) __UpperCamelCase = model_args.pop('n_layer' ) __UpperCamelCase = ConfigClass(**checkpoint['model_args'] ) __UpperCamelCase = ModelClass(config=__lowercase ) __UpperCamelCase = GenerationConfigClass() __UpperCamelCase = model_generation_config __UpperCamelCase = checkpoint['model'] # fixup checkpoint __UpperCamelCase = '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(__lowercase ): # replace part of the key with corresponding layer name in HF implementation __UpperCamelCase = k[len(__lowercase ) :] for old_layer_name in new_layer_name_dict: __UpperCamelCase = new_k.replace(__lowercase , new_layer_name_dict[old_layer_name] ) __UpperCamelCase = state_dict.pop(__lowercase ) __UpperCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() ) __UpperCamelCase = {k for k in extra_keys if not k.endswith('.attn.bias' )} __UpperCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() ) __UpperCamelCase = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(__lowercase ) != 0: raise ValueError(F'''extra keys found: {extra_keys}''' ) if len(__lowercase ) != 0: raise ValueError(F'''missing keys: {missing_keys}''' ) model.load_state_dict(__lowercase , strict=__lowercase ) __UpperCamelCase = model.num_parameters(exclude_embeddings=__lowercase ) __UpperCamelCase = checkpoint['best_val_loss'].item() logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowercase , 3 )} loss''' ) model.eval() model.to(__lowercase ) del checkpoint, state_dict return model def lowercase__ ( __lowercase : List[Any] , __lowercase : Any=False , __lowercase : List[Any]="text" ) -> int: """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() __UpperCamelCase = 'cpu' # do conversion on cpu __UpperCamelCase = _get_ckpt_path(__lowercase , use_small=__lowercase ) __UpperCamelCase = _load_model(__lowercase , __lowercase , model_type=__lowercase , use_small=__lowercase ) # load bark initial model __UpperCamelCase = _bark_load_model(__lowercase , 'cpu' , model_type=__lowercase , use_small=__lowercase ) if model_type == "text": __UpperCamelCase = bark_model['model'] if model.num_parameters(exclude_embeddings=__lowercase ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model __UpperCamelCase = 5 __UpperCamelCase = 10 if model_type in ["text", "coarse"]: __UpperCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) __UpperCamelCase = bark_model(__lowercase )[0] __UpperCamelCase = model(__lowercase ) # take last logits __UpperCamelCase = output_new_model_total.logits[:, [-1], :] else: __UpperCamelCase = 3 __UpperCamelCase = 8 __UpperCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) __UpperCamelCase = model(__lowercase , __lowercase ) __UpperCamelCase = bark_model(__lowercase , __lowercase ) __UpperCamelCase = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[Any] , ) -> List[Any]: """simple docstring""" __UpperCamelCase = os.path.join(__lowercase , __lowercase ) __UpperCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) ) __UpperCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) ) __UpperCamelCase = BarkFineConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) ) __UpperCamelCase = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) __UpperCamelCase = BarkSemanticModel.from_pretrained(__lowercase ) __UpperCamelCase = BarkCoarseModel.from_pretrained(__lowercase ) __UpperCamelCase = BarkFineModel.from_pretrained(__lowercase ) __UpperCamelCase = EncodecModel.from_pretrained('facebook/encodec_24khz' ) __UpperCamelCase = BarkConfig.from_sub_model_configs( __lowercase , __lowercase , __lowercase , __lowercase ) __UpperCamelCase = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) __UpperCamelCase = BarkModel(__lowercase ) __UpperCamelCase = semantic __UpperCamelCase = coarseAcoustic __UpperCamelCase = fineAcoustic __UpperCamelCase = codec __UpperCamelCase = bark_generation_config Path(__lowercase ).mkdir(exist_ok=__lowercase ) bark.save_pretrained(__lowercase , repo_id=__lowercase , push_to_hub=__lowercase ) if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') a__ : Optional[int] =parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
399
0
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowercase : Tuple = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ) -> Dict: snake_case_ : int = tokenizer snake_case_ : Tuple = dataset snake_case_ : List[str] = len(__a ) if n_tasks is None else n_tasks snake_case_ : Optional[Any] = n_copies def __iter__( self ) -> Union[str, Any]: snake_case_ : Optional[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) snake_case_ : Optional[Any] = self.tokenizer(__a , padding=__a , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: snake_case_ : int = start_length snake_case_ : Union[str, Any] = eof_strings snake_case_ : List[str] = tokenizer def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]: snake_case_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) snake_case_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__a ) def lowerCAmelCase__ ( _a : Dict ): snake_case_ : int = re.split("(%s)" % "|".join(__snake_case ) , __snake_case ) # last string should be "" return "".join(string_list[:-2] ) def lowerCAmelCase__ ( _a : Optional[int] , _a : Dict , _a : Any , _a : List[str] , _a : int , _a : Tuple=20 , **_a : List[str] ): snake_case_ : Any = defaultdict(__snake_case ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__snake_case ) ): with torch.no_grad(): snake_case_ : int = batch["ids"].shape[-1] snake_case_ : Tuple = accelerator.unwrap_model(__snake_case ).generate( input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__snake_case , **__snake_case ) # each task is generated batch_size times snake_case_ : Tuple = batch["task_id"].repeat(__snake_case ) snake_case_ : Optional[int] = accelerator.pad_across_processes( __snake_case , dim=1 , pad_index=tokenizer.pad_token_id ) snake_case_ , snake_case_ : Optional[int] = accelerator.gather((generated_tokens, generated_tasks) ) snake_case_ : Tuple = generated_tokens.cpu().numpy() snake_case_ : List[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__snake_case , __snake_case ): gen_token_dict[task].append(__snake_case ) snake_case_ : Optional[int] = [[] for _ in range(__snake_case )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: snake_case_ : str = tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) code_gens[task].append(remove_last_block(__snake_case ) ) return code_gens def lowerCAmelCase__ ( ): snake_case_ : Optional[Any] = HfArgumentParser(__snake_case ) snake_case_ : Tuple = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric snake_case_ : Tuple = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing snake_case_ : List[str] = "false" if args.num_workers is None: snake_case_ : Union[str, Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate snake_case_ : Dict = Accelerator() set_seed(args.seed , device_specific=__snake_case ) # Load model and tokenizer snake_case_ : str = AutoTokenizer.from_pretrained(args.model_ckpt ) snake_case_ : Any = tokenizer.eos_token snake_case_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings snake_case_ : List[str] = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ), } # Load evaluation dataset and metric snake_case_ : Optional[Any] = load_dataset("openai_humaneval" ) snake_case_ : Optional[int] = load_metric("code_eval" ) snake_case_ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) snake_case_ : Any = args.n_samples // args.batch_size snake_case_ : str = TokenizedDataset(__snake_case , human_eval["test"] , n_copies=__snake_case , n_tasks=__snake_case ) # do not confuse args.batch_size, which is actually the num_return_sequences snake_case_ : Optional[Any] = DataLoader(__snake_case , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: snake_case_ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception snake_case_ , snake_case_ : List[Any] = accelerator.prepare(__snake_case , __snake_case ) snake_case_ : Optional[int] = complete_code( __snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , ) if accelerator.is_main_process: snake_case_ : Optional[Any] = [] for task in tqdm(range(__snake_case ) ): snake_case_ : Optional[int] = human_eval["test"][task]["test"] snake_case_ : List[str] = F'''check({human_eval['test'][task]['entry_point']})''' references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric snake_case_ , snake_case_ : List[Any] = code_eval_metric.compute( references=__snake_case , predictions=__snake_case , num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , "w" ) as fp: json.dump(__snake_case , __snake_case ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
712
def lowerCAmelCase__ ( _a : int ): snake_case_ : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def lowerCAmelCase__ ( _a : int ): snake_case_ : List[str] = 0 while number > 0: snake_case_ : Dict = number % 10 sum_of_digits += last_digit snake_case_ : List[Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def lowerCAmelCase__ ( _a : int = 1_00 ): snake_case_ : Optional[Any] = factorial(_a ) snake_case_ : Optional[int] = split_and_add(_a ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
114
0
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class snake_case__ ( _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ = (DPMSolverSinglestepScheduler,) SCREAMING_SNAKE_CASE__ = (("num_inference_steps", 25),) def __lowerCAmelCase ( self : Optional[int] , **lowercase : List[Any] ): '''simple docstring''' UpperCAmelCase : Tuple = { "num_train_timesteps": 10_00, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**snake_case_ ) return config def __lowerCAmelCase ( self : Any , lowercase : Optional[Any]=0 , **lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase : List[str] = dict(self.forward_default_kwargs ) UpperCAmelCase : Dict = kwargs.pop("num_inference_steps" , snake_case_ ) UpperCAmelCase : Union[str, Any] = self.dummy_sample UpperCAmelCase : str = 0.1 * sample UpperCAmelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase : int = self.get_scheduler_config(**snake_case_ ) UpperCAmelCase : Tuple = scheduler_class(**snake_case_ ) scheduler.set_timesteps(snake_case_ ) # copy over dummy past residuals UpperCAmelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case_ ) UpperCAmelCase : Any = scheduler_class.from_pretrained(snake_case_ ) new_scheduler.set_timesteps(snake_case_ ) # copy over dummy past residuals UpperCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase , UpperCAmelCase : Tuple = sample, sample for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ): UpperCAmelCase : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample UpperCAmelCase : Optional[Any] = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass def __lowerCAmelCase ( self : str , lowercase : List[Any]=0 , **lowercase : Dict ): '''simple docstring''' UpperCAmelCase : Tuple = dict(self.forward_default_kwargs ) UpperCAmelCase : Union[str, Any] = kwargs.pop("num_inference_steps" , snake_case_ ) UpperCAmelCase : str = self.dummy_sample UpperCAmelCase : Dict = 0.1 * sample UpperCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase : Any = self.get_scheduler_config() UpperCAmelCase : Union[str, Any] = scheduler_class(**snake_case_ ) scheduler.set_timesteps(snake_case_ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case_ ) UpperCAmelCase : Tuple = scheduler_class.from_pretrained(snake_case_ ) # copy over dummy past residuals new_scheduler.set_timesteps(snake_case_ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample UpperCAmelCase : Tuple = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self : List[str] , lowercase : Any=None , **lowercase : str ): '''simple docstring''' if scheduler is None: UpperCAmelCase : int = self.scheduler_classes[0] UpperCAmelCase : int = self.get_scheduler_config(**snake_case_ ) UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case_ ) UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case_ ) UpperCAmelCase : Any = scheduler_class(**snake_case_ ) UpperCAmelCase : Optional[Any] = 10 UpperCAmelCase : Any = self.dummy_model() UpperCAmelCase : List[str] = self.dummy_sample_deter scheduler.set_timesteps(snake_case_ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Any = model(snake_case_ , snake_case_ ) UpperCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample return sample def __lowerCAmelCase ( self : Dict ): '''simple docstring''' UpperCAmelCase : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) UpperCAmelCase : Any = 50 UpperCAmelCase : int = self.dummy_model() UpperCAmelCase : List[str] = self.dummy_sample_deter scheduler.set_timesteps(snake_case_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): UpperCAmelCase : Tuple = model(snake_case_ , snake_case_ ) UpperCAmelCase : int = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3 def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=snake_case_ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' UpperCAmelCase : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) UpperCAmelCase : Dict = self.full_loop(scheduler=snake_case_ ) UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3 UpperCAmelCase : int = DEISMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : str = UniPCMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : str = DPMSolverSinglestepScheduler.from_config(scheduler.config ) UpperCAmelCase : str = self.full_loop(scheduler=snake_case_ ) UpperCAmelCase : str = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' self.check_over_configs(thresholding=snake_case_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type="dpmsolver++" , solver_order=snake_case_ , solver_type=snake_case_ , ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case_ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , ) UpperCAmelCase : Tuple = self.full_loop( solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , ) assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self : Dict ): '''simple docstring''' self.check_over_configs(lower_order_final=snake_case_ ) self.check_over_configs(lower_order_final=snake_case_ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.check_over_configs(variance_type=snake_case_ ) self.check_over_configs(variance_type="learned_range" ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' UpperCAmelCase : Tuple = self.full_loop() UpperCAmelCase : Any = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.full_loop(use_karras_sigmas=snake_case_ ) UpperCAmelCase : str = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3 def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3 def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=snake_case_ ) UpperCAmelCase : int = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3 def __lowerCAmelCase ( self : Dict ): '''simple docstring''' UpperCAmelCase : Any = self.scheduler_classes[0] UpperCAmelCase : Optional[Any] = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 ) UpperCAmelCase : Tuple = scheduler_class(**snake_case_ ) UpperCAmelCase : Tuple = 10 UpperCAmelCase : Optional[Any] = self.dummy_model() UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(snake_case_ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Optional[Any] = model(snake_case_ , snake_case_ ) UpperCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample assert sample.dtype == torch.floataa
595
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
426
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase ): lowerCamelCase : int = ViTImageProcessor if is_vision_available() else None @property def __UpperCAmelCase ( self : List[Any] ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : List[str] ) -> Dict: lowerCAmelCase = (3, 3_2, 1_2_8) lowerCAmelCase = tempfile.mkdtemp() # fmt: off lowerCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '\n' ) lowerCAmelCase = { 'do_normalize': False, 'do_resize': True, 'image_processor_type': 'ViTImageProcessor', 'resample': 3, 'size': {'height': 3_2, 'width': 1_2_8}, } lowerCAmelCase = os.path.join(self.tmpdirname , UpperCAmelCase__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) -> int: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __UpperCAmelCase ( self : Any , **UpperCAmelCase__ : int ) -> int: return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> int: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : List[Any] ) -> Any: lowerCAmelCase = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta ) lowerCAmelCase = Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) return image_input def __UpperCAmelCase ( self : List[Any] ) -> Dict: lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase__ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def __UpperCAmelCase ( self : str ) -> str: lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCAmelCase = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) lowerCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple ) -> int: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='np' ) lowerCAmelCase = processor(images=UpperCAmelCase__ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self : int ) -> List[str]: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowerCAmelCase = 'test' lowerCAmelCase = processor(text=UpperCAmelCase__ ) lowerCAmelCase = tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : Any ) -> int: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowerCAmelCase = 'test' lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase__ ): processor() def __UpperCAmelCase ( self : int ) -> int: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.char_decode(UpperCAmelCase__ ) lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ ) lowerCAmelCase = [seq.replace(' ' , '' ) for seq in decoded_tok] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> Any: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowerCAmelCase = None lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __UpperCAmelCase ( self : int ) -> List[str]: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowerCAmelCase = torch.randn(1 , 2_7 , 3_8 ) lowerCAmelCase = torch.randn(1 , 2_7 , 5_0_2_5_7 ) lowerCAmelCase = torch.randn(1 , 2_7 , 3_0_5_2_2 ) lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
713
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __snake_case ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
513
0
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' lowercase__ : Optional[Any] = abs(SCREAMING_SNAKE_CASE_ ) lowercase__ : Tuple = 0 while n > 0: res += n % 10 n //= 10 return res def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' lowercase__ : Optional[int] = abs(SCREAMING_SNAKE_CASE_ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return sum(int(SCREAMING_SNAKE_CASE_ ) for c in str(abs(SCREAMING_SNAKE_CASE_ ) ) ) def snake_case__ ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : int ) -> None: lowercase__ : Optional[int] = f"""{func.__name__}({value})""" lowercase__ : Union[str, Any] = timeit(f"""__main__.{call}""" , setup='import __main__' ) print(f"""{call:56} = {func(SCREAMING_SNAKE_CASE_ )} -- {timing:.4f} seconds""" ) for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
164
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING snake_case_ = logging.get_logger(__name__) snake_case_ = Dict[str, Any] snake_case_ = List[Prediction] @add_end_docstrings(__snake_case ) class SCREAMING_SNAKE_CASE__ (__snake_case ): def __init__( self , *a , **a): super().__init__(*a , **a) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""") requires_backends(self , 'vision') self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items())) def snake_case_ ( self , **a): lowercase__ : Optional[int] = {} if "threshold" in kwargs: lowercase__ : List[Any] = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self , *a , **a): return super().__call__(*a , **a) def snake_case_ ( self , a): lowercase__ : Optional[int] = load_image(a) lowercase__ : Any = torch.IntTensor([[image.height, image.width]]) lowercase__ : int = self.image_processor(images=[image] , return_tensors='pt') if self.tokenizer is not None: lowercase__ : str = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt') lowercase__ : Union[str, Any] = target_size return inputs def snake_case_ ( self , a): lowercase__ : Any = model_inputs.pop('target_size') lowercase__ : Tuple = self.model(**a) lowercase__ : Dict = outputs.__class__({'target_size': target_size, **outputs}) if self.tokenizer is not None: lowercase__ : Tuple = model_inputs['bbox'] return model_outputs def snake_case_ ( self , a , a=0.9): lowercase__ : Union[str, Any] = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. lowercase__ , lowercase__ : Tuple = target_size[0].tolist() def unnormalize(a): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ])) lowercase__ , lowercase__ : List[str] = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1) lowercase__ : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] lowercase__ : List[Any] = [unnormalize(a) for bbox in model_outputs['bbox'].squeeze(0)] lowercase__ : str = ['score', 'label', 'box'] lowercase__ : List[str] = [dict(zip(a , a)) for vals in zip(scores.tolist() , a , a) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel lowercase__ : Tuple = self.image_processor.post_process_object_detection(a , a , a) lowercase__ : Union[str, Any] = raw_annotations[0] lowercase__ : List[Any] = raw_annotation['scores'] lowercase__ : Optional[int] = raw_annotation['labels'] lowercase__ : List[Any] = raw_annotation['boxes'] lowercase__ : List[str] = scores.tolist() lowercase__ : Any = [self.model.config.idalabel[label.item()] for label in labels] lowercase__ : str = [self._get_bounding_box(a) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] lowercase__ : Union[str, Any] = ['score', 'label', 'box'] lowercase__ : Optional[Any] = [ dict(zip(a , a)) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes']) ] return annotation def snake_case_ ( self , a): if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.') lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist() lowercase__ : Optional[Any] = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
164
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : Optional[Any] =16 lowerCAmelCase : Dict =32 def A__ ( __A , __A = 16 ): '''simple docstring''' _lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _lowerCamelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__A ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCamelCase : Tuple = datasets.map( __A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__A ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCamelCase : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _lowerCamelCase : int = 8 else: _lowerCamelCase : Optional[int] = None return tokenizer.pad( __A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , ) # Instantiate dataloaders. _lowerCamelCase : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A ) _lowerCamelCase : Any = DataLoader( tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : List[Any] =mocked_dataloaders # noqa: F811 def A__ ( __A , __A ): '''simple docstring''' # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __A ) == "1": _lowerCamelCase : Any = 2 # New Code # _lowerCamelCase : Any = int(args.gradient_accumulation_steps ) # Initialize accelerator _lowerCamelCase : Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__A ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : List[str] = config["""lr"""] _lowerCamelCase : Tuple = int(config["""num_epochs"""] ) _lowerCamelCase : Tuple = int(config["""seed"""] ) _lowerCamelCase : Union[str, Any] = int(config["""batch_size"""] ) _lowerCamelCase : List[str] = evaluate.load("""glue""" , """mrpc""" ) set_seed(__A ) _lowerCamelCase , _lowerCamelCase : str = get_dataloaders(__A , __A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCamelCase : str = model.to(accelerator.device ) # Instantiate optimizer _lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=__A ) # Instantiate scheduler _lowerCamelCase : Any = get_linear_schedule_with_warmup( optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = accelerator.prepare( __A , __A , __A , __A , __A ) # Now we train the model for epoch in range(__A ): model.train() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__A ): _lowerCamelCase : Dict = model(**__A ) _lowerCamelCase : str = output.loss accelerator.backward(__A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__A ) _lowerCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 ) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__A , references=__A , ) _lowerCamelCase : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__A , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _lowerCamelCase : Any = parser.parse_args() _lowerCamelCase : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__A , __A ) if __name__ == "__main__": main()
15
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not nums: raise ValueError("""List is empty""" ) return sum(snake_case__ ) / len(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
609
"""simple docstring""" import argparse from collections import defaultdict def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str ): """simple docstring""" _snake_case : Dict = F"{file}_{class_name}_{test_name}" done_test[_id] += 1 with open(snake_case__ , """r""" ) as f: _snake_case : Optional[Any] = f.readlines() _snake_case : Tuple = F"class {class_name}(" _snake_case : Tuple = F"{4 * ' '}def {test_name}(" _snake_case : Optional[int] = F"{8 * ' '}{correct_line.split()[0]}" _snake_case : Union[str, Any] = F"{16 * ' '}{correct_line.split()[0]}" _snake_case : Optional[int] = False _snake_case : Optional[int] = False _snake_case : Optional[Any] = False _snake_case : Optional[Any] = False _snake_case : List[Any] = 0 _snake_case : List[Any] = 0 _snake_case : List[str] = [] for line in lines: if line.startswith(snake_case__ ): _snake_case : Union[str, Any] = True elif in_class and line.startswith(snake_case__ ): _snake_case : str = True elif in_class and in_func and (line.startswith(snake_case__ ) or line.startswith(snake_case__ )): _snake_case : Optional[Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _snake_case : Optional[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _snake_case : str = True if in_class and in_func and in_line and insert_line: new_lines.append(F"{spaces * ' '}{correct_line}" ) _snake_case : int = False else: new_lines.append(snake_case__ ) with open(snake_case__ , """w""" ) as f: for line in new_lines: f.write(snake_case__ ) def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Any=None ): """simple docstring""" if fail is not None: with open(snake_case__ , """r""" ) as f: _snake_case : List[Any] = {l.strip() for l in f.readlines()} else: _snake_case : Union[str, Any] = None with open(snake_case__ , """r""" ) as f: _snake_case : Union[str, Any] = f.readlines() _snake_case : List[Any] = defaultdict(snake_case__ ) for line in correct_lines: _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) A_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
609
1
import random from typing import Any def A_ ( lowercase_ ) ->list[Any]: """simple docstring""" for _ in range(len(lowercase_ ) ): SCREAMING_SNAKE_CASE = random.randint(0 , len(lowercase_ ) - 1 ) SCREAMING_SNAKE_CASE = random.randint(0 , len(lowercase_ ) - 1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data[b], data[a] return data if __name__ == "__main__": __UpperCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7] __UpperCAmelCase = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
259
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __UpperCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class a_( unittest.TestCase ): """simple docstring""" __snake_case : Union[str, Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __snake_case : Dict =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __snake_case : Optional[Any] ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __snake_case : Any ={ config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __UpperCamelCase ( self : List[Any]) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt') SCREAMING_SNAKE_CASE = text_classifier('This is great !') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}]) SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=2) self.assertEqual( nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]) SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'This is bad'] , top_k=2) self.assertEqual( nested_simplify(lowerCAmelCase__) , [ [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], ] , ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=1) self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}]) # Legacy behavior SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__) self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}]) SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__) self.assertEqual( nested_simplify(lowerCAmelCase__) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]]) SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__) self.assertEqual( nested_simplify(lowerCAmelCase__) , [ [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], ] , ) SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__) self.assertEqual( nested_simplify(lowerCAmelCase__) , [ {'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_0', 'score': 0.5_04}, ] , ) @require_torch def __UpperCamelCase ( self : str) -> Dict: """simple docstring""" import torch SCREAMING_SNAKE_CASE = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu') , ) SCREAMING_SNAKE_CASE = text_classifier('This is great !') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}]) @require_tf def __UpperCamelCase ( self : int) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf') SCREAMING_SNAKE_CASE = text_classifier('This is great !') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}]) @slow @require_torch def __UpperCamelCase ( self : List[Any]) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE = pipeline('text-classification') SCREAMING_SNAKE_CASE = text_classifier('This is great !') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}]) SCREAMING_SNAKE_CASE = text_classifier('This is bad !') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}]) SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}]) @slow @require_tf def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE = pipeline('text-classification' , framework='tf') SCREAMING_SNAKE_CASE = text_classifier('This is great !') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}]) SCREAMING_SNAKE_CASE = text_classifier('This is bad !') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}]) SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal') self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}]) def __UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str) -> str: """simple docstring""" SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) return text_classifier, ["HuggingFace is in", "This is another test"] def __UpperCamelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 SCREAMING_SNAKE_CASE = 'HuggingFace is in' SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__) self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}]) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values()) SCREAMING_SNAKE_CASE = ['HuggingFace is in ', 'Paris is in France'] SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__) self.assertEqual( nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}, {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values()) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values()) # Forcing to get all results with `top_k=None` # This is NOT the legacy format SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__) SCREAMING_SNAKE_CASE = len(model.config.idalabel.values()) self.assertEqual( nested_simplify(lowerCAmelCase__) , [[{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N, [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N] , ) SCREAMING_SNAKE_CASE = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__) self.assertEqual( nested_simplify(lowerCAmelCase__) , {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values()) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. SCREAMING_SNAKE_CASE = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(lowerCAmelCase__): text_classifier(lowerCAmelCase__) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility SCREAMING_SNAKE_CASE = text_classifier([[['HuggingFace is in ', 'Paris is in France']]]) self.assertEqual( nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
259
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a : def __init__( self , _snake_case=2 , _snake_case=3 , _snake_case=64 , _snake_case=None ): """simple docstring""" lowerCAmelCase = np.random.default_rng(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase = length lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ): """simple docstring""" return self.length def __getitem__( self , _snake_case ): """simple docstring""" return {"x": self.x[i], "y": self.y[i]} class a ( torch.nn.Module ): def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ): """simple docstring""" super().__init__() lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase = True def UpperCamelCase__ ( self , _snake_case=None ): """simple docstring""" if self.first_batch: print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) lowerCAmelCase = False return x * self.a[0] + self.b[0] class a ( torch.nn.Module ): def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ): """simple docstring""" super().__init__() lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() ) lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() ) lowerCAmelCase = True def UpperCamelCase__ ( self , _snake_case=None ): """simple docstring""" if self.first_batch: print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) lowerCAmelCase = False return x * self.a + self.b def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : int = 16 ): from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) lowerCAmelCase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} lowerCAmelCase = load_dataset('csv' , data_files=_lowerCamelCase ) lowerCAmelCase = datasets['train'].unique('label' ) lowerCAmelCase = {v: i for i, v in enumerate(_lowerCamelCase )} def tokenize_function(_UpperCAmelCase : List[str] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase = tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' ) if "label" in examples: lowerCAmelCase = [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(_UpperCAmelCase : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowerCAmelCase = DataLoader(tokenized_datasets['train'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=2 ) lowerCAmelCase = DataLoader(tokenized_datasets['validation'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
4
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Dict: '''simple docstring''' def wrapper(*_lowerCamelCase: Any, **_lowerCamelCase: Union[str, Any] ): lowerCAmelCase = timeit.default_timer() lowerCAmelCase = func(*_lowerCamelCase, **_lowerCamelCase ) lowerCAmelCase = timeit.default_timer() - starttime return delta lowerCAmelCase = func.__name__ return wrapper def __magic_name__ ( _lowerCamelCase: dict, _lowerCamelCase: List[Any]=100, _lowerCamelCase: int=None ) -> List[Any]: '''simple docstring''' lowerCAmelCase = [] lowerCAmelCase = seq_shapes or {} for i in range(_lowerCamelCase ): lowerCAmelCase = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(_lowerCamelCase, _ArrayXD ): lowerCAmelCase = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(_lowerCamelCase, datasets.Value ): if v.dtype == "string": lowerCAmelCase = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCAmelCase = np.random.randint(10, size=1 ).astype(v.dtype ).item() elif isinstance(_lowerCamelCase, datasets.Sequence ): while isinstance(_lowerCamelCase, datasets.Sequence ): lowerCAmelCase = v.feature lowerCAmelCase = seq_shapes[k] lowerCAmelCase = np.random.rand(*_lowerCamelCase ).astype(v.dtype ) lowerCAmelCase = data dummy_data.append((i, example) ) return dummy_data def __magic_name__ ( _lowerCamelCase: Tuple, _lowerCamelCase: Tuple, _lowerCamelCase: Union[str, Any]=100, _lowerCamelCase: List[Any]=None ) -> List[Any]: '''simple docstring''' lowerCAmelCase = generate_examples(_lowerCamelCase, num_examples=_lowerCamelCase, seq_shapes=_lowerCamelCase ) with ArrowWriter(features=_lowerCamelCase, path=_lowerCamelCase ) as writer: for key, record in dummy_data: lowerCAmelCase = features.encode_example(_lowerCamelCase ) writer.write(_lowerCamelCase ) lowerCAmelCase , lowerCAmelCase = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) lowerCAmelCase = datasets.Dataset.from_file(filename=_lowerCamelCase, info=datasets.DatasetInfo(features=_lowerCamelCase ) ) return dataset
535
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False ): _lowerCamelCase : Tuple = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def _snake_case ( lowercase__ , lowercase__ ): for i in range(config.num_hidden_layers ): _lowerCamelCase : Optional[int] = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCamelCase : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' ) _lowerCamelCase : int = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : int = in_proj_weight[ : config.hidden_size, : ] _lowerCamelCase : Dict = in_proj_bias[: config.hidden_size] _lowerCamelCase : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCamelCase : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCamelCase : int = in_proj_weight[ -config.hidden_size :, : ] _lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Any = dct.pop(lowercase__ ) _lowerCamelCase : Tuple = val @torch.no_grad() def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Union[str, Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowercase__ ) _lowerCamelCase : int = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Any = False if "vqa" in checkpoint_url: _lowerCamelCase : Tuple = True _lowerCamelCase : Optional[int] = 3129 _lowerCamelCase : List[Any] = 'huggingface/label-files' _lowerCamelCase : List[Any] = 'vqa2-id2label.json' _lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) _lowerCamelCase : Any = {int(lowercase__ ): v for k, v in idalabel.items()} _lowerCamelCase : int = idalabel _lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()} _lowerCamelCase : Tuple = ViltForQuestionAnswering(lowercase__ ) elif "nlvr" in checkpoint_url: _lowerCamelCase : Tuple = True _lowerCamelCase : int = 2 _lowerCamelCase : List[str] = {0: 'False', 1: 'True'} _lowerCamelCase : Optional[int] = {v: k for k, v in config.idalabel.items()} _lowerCamelCase : Any = 3 _lowerCamelCase : Union[str, Any] = ViltForImagesAndTextClassification(lowercase__ ) elif "irtr" in checkpoint_url: _lowerCamelCase : List[str] = True _lowerCamelCase : List[str] = ViltForImageAndTextRetrieval(lowercase__ ) elif "mlm_itm" in checkpoint_url: _lowerCamelCase : str = True _lowerCamelCase : Dict = ViltForMaskedLM(lowercase__ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys _lowerCamelCase : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict'] _lowerCamelCase : Dict = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ ) if mlm_model or irtr_model: _lowerCamelCase : Any = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) # load state dict into HuggingFace model model.eval() if mlm_model: _lowerCamelCase : int = model.load_state_dict(lowercase__ , strict=lowercase__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowercase__ ) # Define processor _lowerCamelCase : Optional[int] = ViltImageProcessor(size=384 ) _lowerCamelCase : int = BertTokenizer.from_pretrained('bert-base-uncased' ) _lowerCamelCase : Optional[Any] = ViltProcessor(lowercase__ , lowercase__ ) # Forward pass on example inputs (image + text) if nlvr_model: _lowerCamelCase : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) _lowerCamelCase : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw ) _lowerCamelCase : int = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) _lowerCamelCase : Tuple = processor(lowercase__ , lowercase__ , return_tensors='pt' ) _lowerCamelCase : Tuple = processor(lowercase__ , lowercase__ , return_tensors='pt' ) _lowerCamelCase : Dict = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: _lowerCamelCase : Any = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw ) if mlm_model: _lowerCamelCase : Tuple = 'a bunch of [MASK] laying on a [MASK].' else: _lowerCamelCase : Any = 'How many cats are there?' _lowerCamelCase : Union[str, Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' ) _lowerCamelCase : int = model(**lowercase__ ) # Verify outputs if mlm_model: _lowerCamelCase : Union[str, Any] = torch.Size([1, 11, 30522] ) _lowerCamelCase : Optional[Any] = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify masked token prediction equals "cats" _lowerCamelCase : List[str] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: _lowerCamelCase : List[Any] = torch.Size([1, 3129] ) _lowerCamelCase : List[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 ) # verify vqa prediction equals "2" _lowerCamelCase : Tuple = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: _lowerCamelCase : Union[str, Any] = torch.Size([1, 2] ) _lowerCamelCase : int = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] ) assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase__ = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
712
"""simple docstring""" def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = int(lowercase__ ) if decimal in (0, 1): # Exit cases for the recursion return str(lowercase__ ) _lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 2 ) return binary_recursive(lowercase__ ) + str(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = str(lowercase__ ).strip() if not number: raise ValueError('No input value was provided' ) _lowerCamelCase : str = '-' if number.startswith('-' ) else '' _lowerCamelCase : Union[str, Any] = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return f'''{negative}0b{binary_recursive(int(lowercase__ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
492
0
"""simple docstring""" def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ) -> Optional[Any]: assert x is not None assert y is not None a_ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) a_ : Dict = len(_SCREAMING_SNAKE_CASE ) # declaring the array for storing the dp values a_ : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): a_ : Tuple = 1 if x[i - 1] == y[j - 1] else 0 a_ : Dict = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) a_ : str = "" a_ , a_ : Tuple = m, n while i > 0 and j > 0: a_ : Optional[Any] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: a_ : str = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": UpperCamelCase = 'AGGTAB' UpperCamelCase = 'GXTXAYB' UpperCamelCase = 4 UpperCamelCase = 'GTAB' UpperCamelCase , UpperCamelCase = longest_common_subsequence(a, b) print('len =', ln, ', sub-sequence =', subseq) import doctest doctest.testmod()
473
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x2_0000 and cp <= 0x2_a6df) # or (cp >= 0x2_a700 and cp <= 0x2_b73f) # or (cp >= 0x2_b740 and cp <= 0x2_b81f) # or (cp >= 0x2_b820 and cp <= 0x2_ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2_f800 and cp <= 0x2_fa1f) # ): # return True return False def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]: # word like '180' or '身高' or '神' for char in word: a_ : Union[str, Any] = ord(_SCREAMING_SNAKE_CASE ) if not _is_chinese_char(_SCREAMING_SNAKE_CASE ): return 0 return 1 def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] ) -> Dict: a_ : int = set() for token in tokens: a_ : Any = len(_SCREAMING_SNAKE_CASE ) > 1 and is_chinese(_SCREAMING_SNAKE_CASE ) if chinese_word: word_set.add(_SCREAMING_SNAKE_CASE ) a_ : int = list(_SCREAMING_SNAKE_CASE ) return word_list def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :set() ) -> Dict: if not chinese_word_set: return bert_tokens a_ : Dict = max([len(_SCREAMING_SNAKE_CASE ) for w in chinese_word_set] ) a_ : int = bert_tokens a_ , a_ : int = 0, len(_SCREAMING_SNAKE_CASE ) while start < end: a_ : List[Any] = True if is_chinese(bert_word[start] ): a_ : Dict = min(end - start , _SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE , 1 , -1 ): a_ : Optional[int] = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a_ : Any = "##" + bert_word[j] a_ : int = start + i a_ : Union[str, Any] = False break if single_word: start += 1 return bert_word def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :LTP , _SCREAMING_SNAKE_CASE :BertTokenizer ) -> str: a_ : Union[str, Any] = [] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ): a_ : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws a_ : Union[str, Any] = [get_chinese_word(_SCREAMING_SNAKE_CASE ) for r in res] ltp_res.extend(_SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) a_ : Tuple = [] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ): a_ : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) a_ : int = [] for input_ids, chinese_word in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): a_ : Any = [] for id in input_ids: a_ : Any = bert_tokenizer._convert_id_to_token(_SCREAMING_SNAKE_CASE ) input_tokens.append(_SCREAMING_SNAKE_CASE ) a_ : int = add_sub_symbol(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a_ : List[str] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_SCREAMING_SNAKE_CASE ): if token[:2] == "##": a_ : List[Any] = token[2:] # save chinese tokens' pos if len(_SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(_SCREAMING_SNAKE_CASE ) ): ref_id.append(_SCREAMING_SNAKE_CASE ) ref_ids.append(_SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) return ref_ids def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Any ) -> str: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , "r" , encoding="utf-8" ) as f: a_ : Optional[Any] = f.readlines() a_ : Optional[int] = [line.strip() for line in data if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a_ : Tuple = LTP(args.ltp ) # faster in GPU device a_ : int = BertTokenizer.from_pretrained(args.bert ) a_ : List[str] = prepare_ref(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(args.save_path , "w" , encoding="utf-8" ) as f: a_ : List[str] = [json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" for ref in ref_ids] f.writelines(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase = parser.parse_args() main(args)
473
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n" def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=8 ) -> Dict: '''simple docstring''' UpperCAmelCase_ : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : str = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase=5_1_2 ,UpperCamelCase=5_1_2 ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ : Tuple = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 ) UpperCAmelCase_ : int = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase_ : int = arr.astype(np.floataa ) / 127.5 - 1 UpperCAmelCase_ : int = np.transpose(lowercase__ ,[2, 0, 1] ) UpperCAmelCase_ : Tuple = torch.from_numpy(lowercase__ ).unsqueeze(0 ) return image class lowercase ( __snake_case ): def __init__( self , _snake_case , _snake_case , _snake_case , ) -> Dict: super().__init__() self.register_modules( unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , ) UpperCAmelCase_ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels) - 1) def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> int: UpperCAmelCase_ : List[Any] = min(int(num_inference_steps * strength) , __UpperCamelCase) UpperCAmelCase_ : Any = max(num_inference_steps - init_timestep , 0) UpperCAmelCase_ : Any = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None) -> str: if not isinstance(__UpperCamelCase , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCamelCase)}""") UpperCAmelCase_ : str = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase) UpperCAmelCase_ : Any = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase_ : Optional[int] = image else: if isinstance(__UpperCamelCase , __UpperCamelCase) and len(__UpperCamelCase) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(__UpperCamelCase)}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") elif isinstance(__UpperCamelCase , __UpperCamelCase): UpperCAmelCase_ : Tuple = [ self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(__UpperCamelCase) ] UpperCAmelCase_ : Any = torch.cat(__UpperCamelCase , dim=0) else: UpperCAmelCase_ : Dict = self.movq.encode(__UpperCamelCase).latent_dist.sample(__UpperCamelCase) UpperCAmelCase_ : Tuple = self.movq.config.scaling_factor * init_latents UpperCAmelCase_ : Any = torch.cat([init_latents] , dim=0) UpperCAmelCase_ : int = init_latents.shape UpperCAmelCase_ : Optional[Any] = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase) # get latents UpperCAmelCase_ : Tuple = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) UpperCAmelCase_ : List[str] = init_latents return latents def _snake_case ( self , _snake_case=0) -> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`') UpperCAmelCase_ : int = torch.device(F"""cuda:{gpu_id}""") UpperCAmelCase_ : Tuple = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase , __UpperCamelCase) def _snake_case ( self , _snake_case=0) -> Tuple: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0'): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.') UpperCAmelCase_ : str = torch.device(F"""cuda:{gpu_id}""") if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=__UpperCamelCase) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : int = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase) # We'll offload the last model manually. UpperCAmelCase_ : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _snake_case ( self) -> int: if not hasattr(self.unet , '_hf_hook'): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase , '_hf_hook') and hasattr(module._hf_hook , 'execution_device') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(__UpperCamelCase) def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 512 , _snake_case = 512 , _snake_case = 100 , _snake_case = 4.0 , _snake_case = 0.3 , _snake_case = 1 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ) -> Dict: UpperCAmelCase_ : Dict = self._execution_device UpperCAmelCase_ : str = guidance_scale > 1.0 if isinstance(__UpperCamelCase , __UpperCamelCase): UpperCAmelCase_ : Optional[int] = torch.cat(__UpperCamelCase , dim=0) UpperCAmelCase_ : int = image_embeds.shape[0] if isinstance(__UpperCamelCase , __UpperCamelCase): UpperCAmelCase_ : List[str] = torch.cat(__UpperCamelCase , dim=0) if do_classifier_free_guidance: UpperCAmelCase_ : str = image_embeds.repeat_interleave(__UpperCamelCase , dim=0) UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0) UpperCAmelCase_ : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=__UpperCamelCase) if not isinstance(__UpperCamelCase , __UpperCamelCase): UpperCAmelCase_ : Tuple = [image] if not all(isinstance(__UpperCamelCase , (PIL.Image.Image, torch.Tensor)) for i in image): raise ValueError( F"""Input is in incorrect format: {[type(__UpperCamelCase) for i in image]}. Currently, we only support PIL image and pytorch tensor""") UpperCAmelCase_ : List[str] = torch.cat([prepare_image(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) for i in image] , dim=0) UpperCAmelCase_ : List[str] = image.to(dtype=image_embeds.dtype , device=__UpperCamelCase) UpperCAmelCase_ : str = self.movq.encode(__UpperCamelCase)['latents'] UpperCAmelCase_ : Optional[int] = latents.repeat_interleave(__UpperCamelCase , dim=0) self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase) UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) UpperCAmelCase_ : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor) UpperCAmelCase_ : Tuple = self.prepare_latents( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase) for i, t in enumerate(self.progress_bar(__UpperCamelCase)): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2) if do_classifier_free_guidance else latents UpperCAmelCase_ : int = {'image_embeds': image_embeds} UpperCAmelCase_ : Any = self.unet( sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1) UpperCAmelCase_ , UpperCAmelCase_ : Any = noise_pred.chunk(2) UpperCAmelCase_ , UpperCAmelCase_ : Any = variance_pred.chunk(2) UpperCAmelCase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , 'variance_type') and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : Tuple = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0] # post-processing UpperCAmelCase_ : Tuple = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase)['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""") if output_type in ["np", "pil"]: UpperCAmelCase_ : Optional[Any] = image * 0.5 + 0.5 UpperCAmelCase_ : Optional[int] = image.clamp(0 , 1) UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = self.numpy_to_pil(__UpperCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase)
702
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase__ = logging.get_logger(__name__) # General docstring lowerCAmelCase__ = "RegNetConfig" # Base docstring lowerCAmelCase__ = "facebook/regnet-y-040" lowerCAmelCase__ = [1, 1088, 7, 7] # Image classification docstring lowerCAmelCase__ = "facebook/regnet-y-040" lowerCAmelCase__ = "tabby, tabby cat" lowerCAmelCase__ = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case = 3 , _snake_case = 1 , _snake_case = 1 , _snake_case = "relu" , ) -> int: super().__init__() UpperCAmelCase_ : str = nn.Convad( _snake_case , _snake_case , kernel_size=_snake_case , stride=_snake_case , padding=kernel_size // 2 , groups=_snake_case , bias=_snake_case , ) UpperCAmelCase_ : List[Any] = nn.BatchNormad(_snake_case) UpperCAmelCase_ : Tuple = ACTaFN[activation] if activation is not None else nn.Identity() def _snake_case ( self , _snake_case) -> Tuple: UpperCAmelCase_ : Optional[int] = self.convolution(_snake_case) UpperCAmelCase_ : int = self.normalization(_snake_case) UpperCAmelCase_ : Optional[int] = self.activation(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case) -> List[Any]: super().__init__() UpperCAmelCase_ : Tuple = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act) UpperCAmelCase_ : Optional[Any] = config.num_channels def _snake_case ( self , _snake_case) -> Dict: UpperCAmelCase_ : List[Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.') UpperCAmelCase_ : Any = self.embedder(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case = 2) -> Optional[Any]: super().__init__() UpperCAmelCase_ : Any = nn.Convad(_snake_case , _snake_case , kernel_size=1 , stride=_snake_case , bias=_snake_case) UpperCAmelCase_ : Optional[Any] = nn.BatchNormad(_snake_case) def _snake_case ( self , _snake_case) -> Tensor: UpperCAmelCase_ : Optional[Any] = self.convolution(_snake_case) UpperCAmelCase_ : Dict = self.normalization(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case) -> Any: super().__init__() UpperCAmelCase_ : Tuple = nn.AdaptiveAvgPoolad((1, 1)) UpperCAmelCase_ : int = nn.Sequential( nn.Convad(_snake_case , _snake_case , kernel_size=1) , nn.ReLU() , nn.Convad(_snake_case , _snake_case , kernel_size=1) , nn.Sigmoid() , ) def _snake_case ( self , _snake_case) -> Any: # b c h w -> b c 1 1 UpperCAmelCase_ : Union[str, Any] = self.pooler(_snake_case) UpperCAmelCase_ : Any = self.attention(_snake_case) UpperCAmelCase_ : Optional[Any] = hidden_state * attention return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1) -> str: super().__init__() UpperCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : Any = max(1 , out_channels // config.groups_width) UpperCAmelCase_ : str = ( RegNetShortCut(_snake_case , _snake_case , stride=_snake_case) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Optional[int] = nn.Sequential( RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_snake_case , _snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act) , RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=_snake_case) , ) UpperCAmelCase_ : int = ACTaFN[config.hidden_act] def _snake_case ( self , _snake_case) -> Union[str, Any]: UpperCAmelCase_ : str = hidden_state UpperCAmelCase_ : List[Any] = self.layer(_snake_case) UpperCAmelCase_ : Dict = self.shortcut(_snake_case) hidden_state += residual UpperCAmelCase_ : Any = self.activation(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1) -> int: super().__init__() UpperCAmelCase_ : List[Any] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : Optional[Any] = max(1 , out_channels // config.groups_width) UpperCAmelCase_ : Optional[int] = ( RegNetShortCut(_snake_case , _snake_case , stride=_snake_case) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Optional[Any] = nn.Sequential( RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_snake_case , _snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act) , RegNetSELayer(_snake_case , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=_snake_case) , ) UpperCAmelCase_ : Any = ACTaFN[config.hidden_act] def _snake_case ( self , _snake_case) -> Union[str, Any]: UpperCAmelCase_ : List[str] = hidden_state UpperCAmelCase_ : int = self.layer(_snake_case) UpperCAmelCase_ : Any = self.shortcut(_snake_case) hidden_state += residual UpperCAmelCase_ : Any = self.activation(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 2 , _snake_case = 2 , ) -> Optional[int]: super().__init__() UpperCAmelCase_ : str = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer UpperCAmelCase_ : Dict = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _snake_case , _snake_case , _snake_case , stride=_snake_case , ) , *[layer(_snake_case , _snake_case , _snake_case) for _ in range(depth - 1)] , ) def _snake_case ( self , _snake_case) -> Dict: UpperCAmelCase_ : Optional[Any] = self.layers(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case) -> List[Any]: super().__init__() UpperCAmelCase_ : List[str] = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , )) UpperCAmelCase_ : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(_snake_case , config.depths[1:]): self.stages.append(RegNetStage(_snake_case , _snake_case , _snake_case , depth=_snake_case)) def _snake_case ( self , _snake_case , _snake_case = False , _snake_case = True) -> BaseModelOutputWithNoAttention: UpperCAmelCase_ : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase_ : int = hidden_states + (hidden_state,) UpperCAmelCase_ : Tuple = stage_module(_snake_case) if output_hidden_states: UpperCAmelCase_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case) class lowercase ( a_ ): _lowerCamelCase : str= RegNetConfig _lowerCamelCase : Optional[int]= "regnet" _lowerCamelCase : Union[str, Any]= "pixel_values" _lowerCamelCase : List[str]= True def _snake_case ( self , _snake_case) -> List[Any]: if isinstance(_snake_case , nn.Convad): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu') elif isinstance(_snake_case , (nn.BatchNormad, nn.GroupNorm)): nn.init.constant_(module.weight , 1) nn.init.constant_(module.bias , 0) def _snake_case ( self , _snake_case , _snake_case=False) -> List[Any]: if isinstance(_snake_case , _snake_case): UpperCAmelCase_ : str = value lowerCAmelCase__ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" lowerCAmelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", a_, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class lowercase ( a_ ): def __init__( self , _snake_case) -> List[str]: super().__init__(_snake_case) UpperCAmelCase_ : str = config UpperCAmelCase_ : Optional[int] = RegNetEmbeddings(_snake_case) UpperCAmelCase_ : int = RegNetEncoder(_snake_case) UpperCAmelCase_ : Any = nn.AdaptiveAvgPoolad((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self , _snake_case , _snake_case = None , _snake_case = None) -> BaseModelOutputWithPoolingAndNoAttention: UpperCAmelCase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : int = self.embedder(_snake_case) UpperCAmelCase_ : str = self.encoder( _snake_case , output_hidden_states=_snake_case , return_dict=_snake_case) UpperCAmelCase_ : List[Any] = encoder_outputs[0] UpperCAmelCase_ : str = self.pooler(_snake_case) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", a_, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class lowercase ( a_ ): def __init__( self , _snake_case) -> Union[str, Any]: super().__init__(_snake_case) UpperCAmelCase_ : List[str] = config.num_labels UpperCAmelCase_ : Optional[int] = RegNetModel(_snake_case) # classification head UpperCAmelCase_ : Optional[Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ) -> ImageClassifierOutputWithNoAttention: UpperCAmelCase_ : str = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : Optional[Any] = self.regnet(_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case) UpperCAmelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase_ : int = self.classifier(_snake_case) UpperCAmelCase_ : List[Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase_ : Optional[int] = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase_ : Union[str, Any] = 'single_label_classification' else: UpperCAmelCase_ : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": UpperCAmelCase_ : int = MSELoss() if self.num_labels == 1: UpperCAmelCase_ : Dict = loss_fct(logits.squeeze() , labels.squeeze()) else: UpperCAmelCase_ : Union[str, Any] = loss_fct(_snake_case , _snake_case) elif self.config.problem_type == "single_label_classification": UpperCAmelCase_ : Optional[int] = CrossEntropyLoss() UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1)) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase_ : int = BCEWithLogitsLoss() UpperCAmelCase_ : Tuple = loss_fct(_snake_case , _snake_case) if not return_dict: UpperCAmelCase_ : Union[str, Any] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states)
471
0
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _lowercase: """simple docstring""" @staticmethod def snake_case ( *a: Dict ,**a: Union[str, Any] ): pass def __snake_case ( lowerCAmelCase : Image ): __UpperCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __snake_case ( lowerCAmelCase : Image ): __UpperCAmelCase = np.array(lowerCAmelCase ) __UpperCAmelCase = npimg.shape return {"hash": hashimage(lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _lowercase( unittest.TestCase ): """simple docstring""" __lowerCamelCase = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __lowerCamelCase = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def snake_case ( self: Any ,a: List[str] ,a: Tuple ,a: str ): __UpperCAmelCase = MaskGenerationPipeline(model=a ,image_processor=a ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case ( self: List[Any] ,a: List[Any] ,a: Dict ): pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def snake_case ( self: Union[str, Any] ): pass @slow @require_torch def snake_case ( self: Union[str, Any] ): __UpperCAmelCase = pipeline('mask-generation' ,model='facebook/sam-vit-huge' ) __UpperCAmelCase = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(a ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(a ,decimals=4 ) ,[ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ] ,) # fmt: on @require_torch @slow def snake_case ( self: Tuple ): __UpperCAmelCase = 'facebook/sam-vit-huge' __UpperCAmelCase = pipeline('mask-generation' ,model=a ) __UpperCAmelCase = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(a ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(a ,decimals=4 ) ,[ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, ] ,)
396
'''simple docstring''' def __snake_case ( lowerCAmelCase : str ): if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) __UpperCAmelCase = sorted(string.lower() ) return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) ) if __name__ == "__main__": _UpperCamelCase : List[str] = input('Enter a string ').strip() _UpperCamelCase : List[Any] = is_isogram(input_str) print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
396
1
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(a_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any] ) -> Tuple: """simple docstring""" super().__init__(*lowerCAmelCase , **lowerCAmelCase ) requires_backends(self , """decord""" ) self.check_model_type(lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None ) -> Any: """simple docstring""" __lowerCAmelCase : Optional[int] = {} if frame_sampling_rate is not None: __lowerCAmelCase : List[Any] = frame_sampling_rate if num_frames is not None: __lowerCAmelCase : Optional[Any] = num_frames __lowerCAmelCase : Optional[Any] = {} if top_k is not None: __lowerCAmelCase : int = top_k return preprocess_params, {}, postprocess_params def __call__( self : Tuple , lowerCAmelCase : Union[str, List[str]] , **lowerCAmelCase : str ) -> str: """simple docstring""" return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : List[str]=1 ) -> str: """simple docstring""" if num_frames is None: __lowerCAmelCase : Tuple = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): __lowerCAmelCase : List[Any] = BytesIO(requests.get(lowerCAmelCase ).content ) __lowerCAmelCase : Union[str, Any] = VideoReader(lowerCAmelCase ) videoreader.seek(0 ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : int = num_frames * frame_sampling_rate - 1 __lowerCAmelCase : int = np.linspace(lowerCAmelCase , lowerCAmelCase , num=lowerCAmelCase , dtype=np.intaa ) __lowerCAmelCase : Any = videoreader.get_batch(lowerCAmelCase ).asnumpy() __lowerCAmelCase : List[Any] = list(lowerCAmelCase ) __lowerCAmelCase : Optional[int] = self.image_processor(lowerCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : List[str] = self.model(**lowerCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=5 ) -> Dict: """simple docstring""" if top_k > self.model.config.num_labels: __lowerCAmelCase : List[str] = self.model.config.num_labels if self.framework == "pt": __lowerCAmelCase : Tuple = model_outputs.logits.softmax(-1 )[0] __lowerCAmelCase : Any = probs.topk(lowerCAmelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) __lowerCAmelCase : str = scores.tolist() __lowerCAmelCase : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase , lowerCAmelCase )]
706
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def snake_case_ (__A : np.ndarray ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def snake_case_ (__A : np.ndarray , __A : np.ndarray , __A : int ) -> np.ndarray: __lowerCAmelCase : str = np.nan for i in range(__A ): __lowerCAmelCase : Optional[int] = features[:, labels == i] __lowerCAmelCase : List[str] = data.mean(1 ) # Centralize the data of class i __lowerCAmelCase : int = data - column_reshape(__A ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__A , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCAmelCase : str = np.dot(__A , centered_data.T ) return covariance_sum / features.shape[1] def snake_case_ (__A : np.ndarray , __A : np.ndarray , __A : int ) -> np.ndarray: __lowerCAmelCase : Tuple = features.mean(1 ) __lowerCAmelCase : Union[str, Any] = np.nan for i in range(__A ): __lowerCAmelCase : Any = features[:, labels == i] __lowerCAmelCase : Dict = data.shape[1] __lowerCAmelCase : int = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCAmelCase : Optional[Any] = device_data * np.dot( column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , ) return covariance_sum / features.shape[1] def snake_case_ (__A : np.ndarray , __A : int ) -> np.ndarray: # Check if the features have been loaded if features.any(): __lowerCAmelCase : List[Any] = features.mean(1 ) # Center the dataset __lowerCAmelCase : List[Any] = features - np.reshape(__A , (data_mean.size, 1) ) __lowerCAmelCase : Dict = np.dot(__A , centered_data.T ) / features.shape[1] __lowerCAmelCase ,__lowerCAmelCase : int = np.linalg.eigh(__A ) # Take all the columns in the reverse order (-1), and then takes only the first __lowerCAmelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space __lowerCAmelCase : Any = np.dot(filtered_eigenvectors.T , __A ) logging.info("""Principal Component Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__A ) logging.error("""Dataset empty""" ) raise AssertionError def snake_case_ (__A : np.ndarray , __A : np.ndarray , __A : int , __A : int ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: __lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = eigh( covariance_between_classes(__A , __A , __A ) , covariance_within_classes(__A , __A , __A ) , ) __lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions] __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = np.linalg.svd(__A ) __lowerCAmelCase : str = svd_matrix[:, 0:dimensions] __lowerCAmelCase : int = np.dot(filtered_svd_matrix.T , __A ) logging.info("""Linear Discriminant Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__A ) logging.error("""Dataset empty""" ) raise AssertionError def snake_case_ () -> None: # Create dummy dataset with 2 classes and 3 features __lowerCAmelCase : Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) __lowerCAmelCase : Tuple = np.array([0, 0, 0, 1, 1] ) __lowerCAmelCase : List[str] = 2 __lowerCAmelCase : List[Any] = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__A ) as error_info: __lowerCAmelCase : List[Any] = linear_discriminant_analysis( __A , __A , __A , __A ) if isinstance(__A , np.ndarray ): raise AssertionError( """Did not raise AssertionError for dimensions > classes""" ) assert error_info.type is AssertionError def snake_case_ () -> None: __lowerCAmelCase : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) __lowerCAmelCase : List[str] = 2 __lowerCAmelCase : Dict = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(__A ) as error_info: __lowerCAmelCase : Union[str, Any] = principal_component_analysis(__A , __A ) if not np.allclose(__A , __A ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
218
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : str = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = """camembert""" def __init__( self : Optional[Any] , lowerCAmelCase_ : Dict=3_0_5_2_2 , lowerCAmelCase_ : Optional[Any]=7_6_8 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Dict=1_2 , lowerCAmelCase_ : Dict=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=5_1_2 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[str]=1e-12 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : str="absolute" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[str] , ) -> Dict: super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" @property def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowerCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
53
'''simple docstring''' import math def __snake_case ( lowercase : Optional[Any] , lowercase : List[str] ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowercase ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase__ = '''Enter the base and the power separated by a comma: ''' lowercase__ , lowercase__ = map(int, input(prompt).split(''',''')) lowercase__ , lowercase__ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowercase__ = res(xa, ya) lowercase__ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
508
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever UpperCamelCase = logging.getLogger(__name__) class _a ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ): super().__init__( __UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , ) __A : List[Any] = None def __UpperCAmelCase( self , __UpperCAmelCase ): logger.info("initializing retrieval" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized" ) # needs to be set manually __A : Optional[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __A : Union[str, Any] = str(distributed_port + 1 ) __A : str = dist.new_group(ranks=__UpperCAmelCase , backend="gloo" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def __UpperCAmelCase( self ): return dist.get_rank(group=self.process_group ) == 0 def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=torch.floataa ): __A : Dict = torch.empty(__UpperCAmelCase , dtype=__UpperCAmelCase ) dist.scatter(__UpperCAmelCase , src=0 , scatter_list=__UpperCAmelCase , group=self.process_group ) return target_tensor def __UpperCAmelCase( self ): __A : str = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __A : List[str] = next((addr for addr in addrs if addr.startswith("e" )) , __UpperCAmelCase ) return ifname def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ): # single GPU training if not dist.is_initialized(): __A , __A : Optional[Any] = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase ) # distributed training __A : Dict = dist.get_world_size(group=self.process_group ) # gather logic __A : int = None if self._is_main(): __A : Tuple = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__UpperCAmelCase )] dist.gather(torch.tensor(__UpperCAmelCase ) , dst=0 , gather_list=__UpperCAmelCase , group=self.process_group ) # scatter logic __A : str = question_hidden_states.shape[0] __A : Union[str, Any] = [] __A : Tuple = [] if self._is_main(): assert len(__UpperCAmelCase ) == world_size __A , __A : Union[str, Any] = self._main_retrieve(torch.cat(__UpperCAmelCase ).numpy() , __UpperCAmelCase ) __A , __A : Any = torch.tensor(__UpperCAmelCase ), torch.tensor(__UpperCAmelCase ) __A : Any = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase ) __A : Dict = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase ) __A : int = self._scattered(__UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) __A : List[str] = self._scattered(__UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__UpperCAmelCase )
387
import os import string import sys UpperCamelCase = 1 << 8 UpperCamelCase = { 'tab': ord('\t'), 'newline': ord('\r'), 'esc': 27, 'up': 65 + ARROW_KEY_FLAG, 'down': 66 + ARROW_KEY_FLAG, 'right': 67 + ARROW_KEY_FLAG, 'left': 68 + ARROW_KEY_FLAG, 'mod_int': 91, 'undefined': sys.maxsize, 'interrupt': 3, 'insert': 50, 'delete': 51, 'pg_up': 53, 'pg_down': 54, } UpperCamelCase = KEYMAP['up'] UpperCamelCase = KEYMAP['left'] if sys.platform == "win32": UpperCamelCase = [] UpperCamelCase = { b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG, b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG, } for i in range(10): UpperCamelCase = ord(str(i)) def lowerCamelCase_ ( ) -> Tuple: if os.name == "nt": import msvcrt __A : Optional[int] = "mbcs" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_lowercase ) == 0: # Read the keystroke __A : Union[str, Any] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __A : Tuple = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __A : int = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) ) WIN_CH_BUFFER.append(_lowercase ) if ord(_lowercase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) __A : Tuple = chr(KEYMAP["esc"] ) except KeyError: __A : Union[str, Any] = cha[1] else: __A : Optional[int] = ch.decode(_lowercase ) else: __A : Dict = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __A : str = sys.stdin.fileno() __A : Tuple = termios.tcgetattr(_lowercase ) try: tty.setraw(_lowercase ) __A : int = sys.stdin.read(1 ) finally: termios.tcsetattr(_lowercase , termios.TCSADRAIN , _lowercase ) return ch def lowerCamelCase_ ( ) -> Union[str, Any]: __A : Any = get_raw_chars() if ord(_lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_lowercase ) == KEYMAP["esc"]: __A : Tuple = get_raw_chars() if ord(_lowercase ) == KEYMAP["mod_int"]: __A : Optional[int] = get_raw_chars() if ord(_lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_lowercase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
387
1
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : List[str] UpperCAmelCase : Optional[str] = None # Automatically constructed UpperCAmelCase : ClassVar[str] = "dict" UpperCAmelCase : ClassVar[Any] = None UpperCAmelCase : str = field(default='''Translation''' , init=__lowerCAmelCase , repr=__lowerCAmelCase ) def __call__( self : List[Any] ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCAmelCase_ ( self : int ): from .features import Value return {k: Value('string' ) for k in sorted(self.languages )} @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[List] = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : Optional[str] = None # Automatically constructed UpperCAmelCase : ClassVar[str] = "dict" UpperCAmelCase : ClassVar[Any] = None UpperCAmelCase : str = field(default='''TranslationVariableLanguages''' , init=__lowerCAmelCase , repr=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = sorted(set(self.languages ) ) if self.languages else None _A = len(self.languages ) if self.languages else None def __call__( self : Tuple ): return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, Any] ): _A = set(self.languages ) if self.languages and set(_UpperCAmelCase ) - lang_set: raise ValueError( F'''Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _A = [] for lang, text in translation_dict.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _A , _A = zip(*sorted(_UpperCAmelCase ) ) return {"language": languages, "translation": translations} def lowerCAmelCase_ ( self : Optional[int] ): from .features import Sequence, Value return { "language": Sequence(Value('string' ) ), "translation": Sequence(Value('string' ) ), }
7
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = '''speech_to_text''' UpperCAmelCase : List[Any] = ['''past_key_values'''] UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ): _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(_UpperCAmelCase ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
7
1
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowercase ( __UpperCamelCase ) -> int: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowercase ( ) -> Optional[int]: with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" __magic_name__ = [1, 2, 3] with pytest.raises(lowerCamelCase_ ): with parallel_backend('''unsupported backend''' ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=2 ) with pytest.raises(lowerCamelCase_ ): with parallel_backend('''unsupported backend''' ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' , [2, -1] ) def lowercase ( __UpperCamelCase ) -> Union[str, Any]: __magic_name__ = [1, 2] __magic_name__ = {'''a''': 1, '''b''': 2} __magic_name__ = {'''a''': [1, 2], '''b''': [3, 4]} __magic_name__ = {'''a''': {'''1''': 1}, '''b''': 2} __magic_name__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} __magic_name__ = [2, 3] __magic_name__ = {'''a''': 2, '''b''': 3} __magic_name__ = {'''a''': [2, 3], '''b''': [4, 5]} __magic_name__ = {'''a''': {'''1''': 2}, '''b''': 3} __magic_name__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} with parallel_backend('''spark''' ): assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
712
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowercase ( __UpperCAmelCase ): _lowerCamelCase = (EulerDiscreteScheduler,) _lowerCamelCase = 10 def lowerCAmelCase__ ( self , **UpperCamelCase_ ): __magic_name__ = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**UpperCamelCase_ ) return config def lowerCAmelCase__ ( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config() __magic_name__ = scheduler_class(**UpperCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) __magic_name__ = torch.manual_seed(0 ) __magic_name__ = self.dummy_model() __magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma __magic_name__ = sample.to(UpperCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): __magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ) __magic_name__ = output.prev_sample __magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) ) __magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3 def lowerCAmelCase__ ( self ): __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config(prediction_type='''v_prediction''' ) __magic_name__ = scheduler_class(**UpperCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) __magic_name__ = torch.manual_seed(0 ) __magic_name__ = self.dummy_model() __magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma __magic_name__ = sample.to(UpperCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): __magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ) __magic_name__ = output.prev_sample __magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) ) __magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def lowerCAmelCase__ ( self ): __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config() __magic_name__ = scheduler_class(**UpperCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ ) __magic_name__ = torch.manual_seed(0 ) __magic_name__ = self.dummy_model() __magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __magic_name__ = sample.to(UpperCamelCase_ ) for t in scheduler.timesteps: __magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ) __magic_name__ = output.prev_sample __magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) ) __magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3 def lowerCAmelCase__ ( self ): __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config() __magic_name__ = scheduler_class(**UpperCamelCase_ , use_karras_sigmas=UpperCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ ) __magic_name__ = torch.manual_seed(0 ) __magic_name__ = self.dummy_model() __magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __magic_name__ = sample.to(UpperCamelCase_ ) for t in scheduler.timesteps: __magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ) __magic_name__ = output.prev_sample __magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) ) __magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
190
0
from __future__ import annotations from collections import Counter from random import random class _UpperCAmelCase : '''simple docstring''' def __init__( self : List[str]) -> Any: """simple docstring""" _UpperCamelCase = {} def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> None: """simple docstring""" _UpperCamelCase = {} def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : float) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(lowercase_) if nodea not in self.connections: self.add_node(lowercase_) _UpperCamelCase = probability def __UpperCAmelCase ( self : Any) -> list[str]: """simple docstring""" return list(self.connections) def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def lowerCAmelCase__ ( a__ , a__ , a__ ) ->dict[str, int]: '''simple docstring''' _UpperCamelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(a__ , a__ , a__ ) _UpperCamelCase = Counter(graph.get_nodes() ) _UpperCamelCase = start for _ in range(a__ ): _UpperCamelCase = graph.transition(a__ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
547
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' __A = ['''image_processor''', '''tokenizer'''] __A = '''ViTImageProcessor''' __A = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Union[str, Any] , lowercase_ : Optional[int]=None , lowercase_ : Dict=None , **lowercase_ : List[str]) -> str: """simple docstring""" _UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) _UpperCamelCase = kwargs.pop("feature_extractor") _UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(lowercase_ , lowercase_) def __call__( self : Any , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Optional[Any]=None , lowercase_ : Dict=None , **lowercase_ : Dict) -> int: """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images.") if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.") if text is not None: _UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_) if visual_prompt is not None: _UpperCamelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_) if images is not None: _UpperCamelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_) if visual_prompt is not None and images is not None: _UpperCamelCase = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: _UpperCamelCase = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_) def __UpperCAmelCase ( self : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any]) -> int: """simple docstring""" return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : int) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*lowercase_ , **lowercase_) @property def __UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCAmelCase ( self : int) -> int: """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
547
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase_ = { "configuration_bridgetower": [ "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "processing_bridgetower": ["BridgeTowerProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["BridgeTowerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerModel", "BridgeTowerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
508
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _a : '''simple docstring''' @staticmethod def UpperCamelCase_ ( *A, **A ): '''simple docstring''' pass def lowercase__( __UpperCamelCase: Image ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def lowercase__( __UpperCamelCase: Image ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = np.array(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = npimg.shape return {"hash": hashimage(__UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _a ( unittest.TestCase ): '''simple docstring''' A : str = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = MaskGenerationPipeline(model=A, image_processor=A ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase_ ( self, A, A ): '''simple docstring''' pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @slow @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = pipeline('mask-generation', model='facebook/sam-vit-huge' ) SCREAMING_SNAKE_CASE : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg', points_per_batch=256 ) # Shortening by hashing SCREAMING_SNAKE_CASE : Any = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(A, decimals=4 ), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71} ], ) # fmt: on @require_torch @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'facebook/sam-vit-huge' SCREAMING_SNAKE_CASE : int = pipeline('mask-generation', model=A ) SCREAMING_SNAKE_CASE : Optional[Any] = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg', pred_iou_thresh=1, points_per_batch=256 ) # Shortening by hashing SCREAMING_SNAKE_CASE : List[Any] = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(A, decimals=4 ), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53}, ], )
508
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase : Tuple = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" _lowercase = ['input_features', 'attention_mask'] def __init__( self : str , lowerCamelCase__ : List[str]=80 , lowerCamelCase__ : Any=16_000 , lowerCamelCase__ : int=80 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=True , **lowerCamelCase__ : Optional[int] , ): super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ ) a__ : Dict = num_mel_bins a__ : Optional[int] = do_ceptral_normalize a__ : List[str] = normalize_means a__ : Any = normalize_vars a__ : Optional[Any] = True def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : np.ndarray , ): a__ : Tuple = waveform * (2**15) # Kaldi compliance: 16-bit signed integers a__ : Union[str, Any] = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ) a__ : str = ta_kaldi.fbank(lowerCamelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _UpperCamelCase( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : int , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: a__ : List[str] = x[:input_length].mean(axis=0 ) a__ : Dict = np.subtract(lowerCamelCase__ , lowerCamelCase__ ) if normalize_vars: a__ : Any = x[:input_length].std(axis=0 ) a__ : List[str] = np.divide(lowerCamelCase__ , lowerCamelCase__ ) if input_length < x.shape[0]: a__ : str = padding_value # make sure array is in float32 a__ : List[Any] = x.astype(np.floataa ) return x def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[np.ndarray] , lowerCamelCase__ : Optional[np.ndarray] = None ): a__ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(lowerCamelCase__ , lowerCamelCase__ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(lowerCamelCase__ , lowerCamelCase__ ) ] def __call__( self : str , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , **lowerCamelCase__ : List[Any] , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a__ : List[str] = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) a__ : Dict = is_batched_numpy or ( isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a__ : str = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ): a__ : Tuple = np.asarray(lowerCamelCase__ , dtype=np.floataa ) elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a__ : List[str] = [raw_speech] # extract fbank features a__ : Any = [self._extract_fbank_features(lowerCamelCase__ ) for waveform in raw_speech] # convert into correct format for padding a__ : Any = BatchFeature({"input_features": features} ) a__ : Any = self.pad( lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) # make sure list is in array format a__ : List[str] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , lowerCamelCase__ ): a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features] a__ : Dict = padded_inputs.get("attention_mask" ) if attention_mask is not None: a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: a__ : Optional[Any] = ( np.array(lowerCamelCase__ , dtype=np.intaa ) if self._get_padding_strategies(lowerCamelCase__ , max_length=lowerCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD else None ) a__ : Optional[Any] = self.normalize( padded_inputs["input_features"] , attention_mask=lowerCamelCase__ ) if return_tensors is not None: a__ : Optional[Any] = padded_inputs.convert_to_tensors(lowerCamelCase__ ) return padded_inputs
37
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : int = IFPipeline UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case_ ( self ) -> str: return self._get_dummy_components() def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case_ ( self ) -> Optional[int]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' ) def snake_case_ ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case_ ( self ) -> Dict: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case_ ( self ) -> Optional[int]: self._test_save_load_local() def snake_case_ ( self ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def snake_case_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> List[Any]: # if UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa ) UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCamelCase : int = None UpperCamelCase : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components ) UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components ) UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 UpperCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Tuple = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : List[Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
40
0
import cmath import math def A (__A : float , __A : float , __A : float , __A : float ) -> Tuple: """simple docstring""" UpperCAmelCase_ = math.radians(__lowercase ) UpperCAmelCase_ = math.radians(__lowercase ) # Convert voltage and current to rectangular form UpperCAmelCase_ = cmath.rect(__lowercase , __lowercase ) UpperCAmelCase_ = cmath.rect(__lowercase , __lowercase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
704
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Dict = LxmertTokenizer UpperCAmelCase__ : Tuple = LxmertTokenizerFast UpperCAmelCase__ : Any = True UpperCAmelCase__ : Dict = True def lowerCamelCase ( self : Tuple): """simple docstring""" super().setUp() UpperCAmelCase_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) def lowerCamelCase ( self : str , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = '''UNwant\u00E9d,running''' UpperCAmelCase_ = '''unwanted, running''' return input_text, output_text def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer_class(self.vocab_file) UpperCAmelCase_ = tokenizer.tokenize('''UNwant\u00E9d,running''') self.assertListEqual(_snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [7, 4, 5, 10, 8, 9]) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" if not self.test_rust_tokenizer: return UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_rust_tokenizer() UpperCAmelCase_ = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ = tokenizer.tokenize(_snake_case) UpperCAmelCase_ = rust_tokenizer.tokenize(_snake_case) self.assertListEqual(_snake_case , _snake_case) UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case) UpperCAmelCase_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case) self.assertListEqual(_snake_case , _snake_case) UpperCAmelCase_ = self.get_rust_tokenizer() UpperCAmelCase_ = tokenizer.encode(_snake_case) UpperCAmelCase_ = rust_tokenizer.encode(_snake_case) self.assertListEqual(_snake_case , _snake_case)
169
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class lowerCAmelCase__ ( unittest.TestCase , __lowercase ): def A_ ( self ) -> List[Any]: '''simple docstring''' _UpperCamelCase = load_tool("""text-question-answering""" ) self.tool.setup() _UpperCamelCase = load_tool("""text-question-answering""" , remote=a ) def A_ ( self ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.tool(a , """What did Hugging Face do in April 2021?""" ) self.assertEqual(a , """launched the BigScience Research Workshop""" ) def A_ ( self ) -> Any: '''simple docstring''' _UpperCamelCase = self.remote_tool(a , """What did Hugging Face do in April 2021?""" ) self.assertEqual(a , """launched the BigScience Research Workshop""" ) def A_ ( self ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.tool(text=a , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(a , """launched the BigScience Research Workshop""" ) def A_ ( self ) -> int: '''simple docstring''' _UpperCamelCase = self.remote_tool(text=a , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(a , """launched the BigScience Research Workshop""" )
612
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar lowerCamelCase__ = TypeVar("T") lowerCamelCase__ = TypeVar("U") class lowerCAmelCase__ ( Generic[T, U] ): def __init__( self , a , a ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = key _UpperCamelCase = val _UpperCamelCase = None _UpperCamelCase = None def __repr__( self ) -> str: '''simple docstring''' return ( F'Node: key: {self.key}, val: {self.val}, ' F'has next: {bool(self.next )}, has prev: {bool(self.prev )}' ) class lowerCAmelCase__ ( Generic[T, U] ): def __init__( self ) -> None: '''simple docstring''' _UpperCamelCase = DoubleLinkedListNode(a , a ) _UpperCamelCase = DoubleLinkedListNode(a , a ) _UpperCamelCase , _UpperCamelCase = self.rear, self.head def __repr__( self ) -> str: '''simple docstring''' _UpperCamelCase = ["""DoubleLinkedList"""] _UpperCamelCase = self.head while node.next is not None: rep.append(str(a ) ) _UpperCamelCase = node.next rep.append(str(self.rear ) ) return ",\n ".join(a ) def A_ ( self , a ) -> None: '''simple docstring''' _UpperCamelCase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _UpperCamelCase = node _UpperCamelCase = previous _UpperCamelCase = node _UpperCamelCase = self.rear def A_ ( self , a ) -> DoubleLinkedListNode[T, U] | None: '''simple docstring''' if node.prev is None or node.next is None: return None _UpperCamelCase = node.next _UpperCamelCase = node.prev _UpperCamelCase = None _UpperCamelCase = None return node class lowerCAmelCase__ ( Generic[T, U] ): UpperCamelCase_ : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , a ) -> int: '''simple docstring''' _UpperCamelCase = DoubleLinkedList() _UpperCamelCase = capacity _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = {} def __repr__( self ) -> str: '''simple docstring''' return ( F'CacheInfo(hits={self.hits}, misses={self.miss}, ' F'capacity={self.capacity}, current size={self.num_keys})' ) def __contains__( self , a ) -> bool: '''simple docstring''' return key in self.cache def A_ ( self , a ) -> U | None: '''simple docstring''' if key in self.cache: self.hits += 1 _UpperCamelCase = self.cache[key] _UpperCamelCase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(a ) return node.val self.miss += 1 return None def A_ ( self , a , a ) -> None: '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _UpperCamelCase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(a ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _UpperCamelCase = DoubleLinkedListNode(a , a ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _UpperCamelCase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _UpperCamelCase = value self.list.add(a ) @classmethod def A_ ( cls , a = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: '''simple docstring''' def cache_decorator_inner(a ) -> Callable[..., U]: def cache_decorator_wrapper(*a ) -> U: if func not in cls.decorator_function_to_instance_map: _UpperCamelCase = LRUCache(a ) _UpperCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _UpperCamelCase = func(*a ) cls.decorator_function_to_instance_map[func].put(args[0] , a ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(a , """cache_info""" , a ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
612
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : str = logging.get_logger(__name__) lowercase__ : Optional[Any] = { """facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""", # See all LeViT models at https://huggingface.co/models?filter=levit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """levit""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : Tuple=2_2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[1_2_8, 2_5_6, 3_8_4] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[4, 8, 1_2] , SCREAMING_SNAKE_CASE_ : List[Any]=[4, 4, 4] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1_6, 1_6, 1_6] , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Tuple=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : Any=0.02 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[str] = image_size lowerCAmelCase_ : int = num_channels lowerCAmelCase_ : Union[str, Any] = kernel_size lowerCAmelCase_ : Union[str, Any] = stride lowerCAmelCase_ : Optional[Any] = padding lowerCAmelCase_ : List[Any] = hidden_sizes lowerCAmelCase_ : int = num_attention_heads lowerCAmelCase_ : List[str] = depths lowerCAmelCase_ : Optional[Any] = key_dim lowerCAmelCase_ : Tuple = drop_path_rate lowerCAmelCase_ : Tuple = patch_size lowerCAmelCase_ : str = attention_ratio lowerCAmelCase_ : str = mlp_ratio lowerCAmelCase_ : Union[str, Any] = initializer_range lowerCAmelCase_ : List[str] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return 1E-4
317
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase__ : Optional[int] = { """169M""": 1_2, """430M""": 2_4, """1B5""": 2_4, """3B""": 3_2, """7B""": 3_2, """14B""": 4_0, } lowercase__ : Optional[Any] = { """169M""": 7_6_8, """430M""": 1_0_2_4, """1B5""": 2_0_4_8, """3B""": 2_5_6_0, """7B""": 4_0_9_6, """14B""": 5_1_2_0, } def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase_ : str = list(state_dict.keys() ) for name in state_dict_keys: lowerCAmelCase_ : List[Any] = state_dict.pop(lowerCAmelCase__ ) # emb -> embedding if name.startswith('emb.' ): lowerCAmelCase_ : Dict = name.replace('emb.' , 'embeddings.' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('blocks.0.ln0' ): lowerCAmelCase_ : str = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' ) # att -> attention lowerCAmelCase_ : Optional[Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowerCAmelCase__ ) # ffn -> feed_forward lowerCAmelCase_ : Any = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowerCAmelCase__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('.time_mix_k' ): lowerCAmelCase_ : str = name.replace('.time_mix_k' , '.time_mix_key' ) # time_mix_v -> time_mix_value and reshape if name.endswith('.time_mix_v' ): lowerCAmelCase_ : int = name.replace('.time_mix_v' , '.time_mix_value' ) # time_mix_r -> time_mix_key and reshape if name.endswith('.time_mix_r' ): lowerCAmelCase_ : Any = name.replace('.time_mix_r' , '.time_mix_receptance' ) if name != "head.weight": lowerCAmelCase_ : Optional[int] = 'rwkv.' + name lowerCAmelCase_ : int = weight return state_dict def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=None ) -> int: """simple docstring""" if tokenizer_file is None: print('No `--tokenizer_file` provided, we will use the default tokenizer.' ) lowerCAmelCase_ : int = 5_0277 lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' ) else: lowerCAmelCase_ : Dict = PreTrainedTokenizerFast(tokenizer_file=lowerCAmelCase__ ) lowerCAmelCase_ : Dict = len(lowerCAmelCase__ ) tokenizer.save_pretrained(lowerCAmelCase__ ) # 2. Build the config lowerCAmelCase_ : int = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: lowerCAmelCase_ : Tuple = candidate break if size is None: raise ValueError('Could not infer the size, please provide it with the `--size` argument.' ) if size not in possible_sizes: raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." ) lowerCAmelCase_ : Dict = RwkvConfig( vocab_size=lowerCAmelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(lowerCAmelCase__ ) # 3. Download model file then convert state_dict lowerCAmelCase_ : Dict = hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = torch.load(lowerCAmelCase__ , map_location='cpu' ) lowerCAmelCase_ : int = convert_state_dict(lowerCAmelCase__ ) # 4. Split in shards and save lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = shard_checkpoint(lowerCAmelCase__ ) for shard_file, shard in shards.items(): torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) if index is not None: lowerCAmelCase_ : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) # Save the index as well with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f: lowerCAmelCase_ : str = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '\n' f.write(lowerCAmelCase__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( 'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' ) lowerCAmelCase_ : List[str] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: lowerCAmelCase_ : List[Any] = torch.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('Please provide a `model_name` to push the model to the Hub.' ) lowerCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ , max_shard_size='2GB' ) tokenizer.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint.""" ) parser.add_argument( """--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo.""" ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model.""" ) parser.add_argument( """--tokenizer_file""", default=None, type=str, help="""Path to the tokenizer file to use (if not provided, only the model is converted).""", ) parser.add_argument( """--size""", default=None, type=str, help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Push to the Hub the converted model.""", ) parser.add_argument( """--model_name""", default=None, type=str, help="""Name of the pushed model on the Hub, including the username / organization.""", ) lowercase__ : List[str] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
317
1
"""simple docstring""" def __lowercase ( _a ): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) snake_case_ : int = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
123
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowercase : Dict ={ """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] =[ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] =[ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple =[ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys _lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
364
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = '''▁''' UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} UpperCAmelCase = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } UpperCAmelCase = { '''facebook/xglm-564M''': 2048, } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES _UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case = None , **snake_case , ): lowercase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase = 7 lowercase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] lowercase = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case ) ) lowercase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} lowercase = len(self.sp_model ) lowercase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(snake_case ) lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): lowercase = self.__dict__.copy() lowercase = None lowercase = self.sp_model.serialized_model_proto() return state def __setstate__( self , snake_case ): lowercase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) if token_ids_a is None: return [1] + ([0] * len(snake_case )) return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): lowercase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def SCREAMING_SNAKE_CASE__ ( self ): lowercase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , snake_case ): return self.sp_model.encode(snake_case , out_type=snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase = self.sp_model.PieceToId(snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , snake_case ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = ''.join(snake_case ).replace(snake_case , ' ' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): if not os.path.isdir(snake_case ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , 'wb' ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
710
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''', } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : int = """autoformer""" _UpperCamelCase : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = [1, 2, 3, 4, 5, 6, 7] , snake_case = True , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 32 , snake_case = 32 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case = True , snake_case=True , snake_case = 10 , snake_case = 25 , snake_case = 3 , **snake_case , ): # time series specific configuration lowercase = prediction_length lowercase = context_length if context_length is not None else prediction_length lowercase = distribution_output lowercase = loss lowercase = input_size lowercase = num_time_features lowercase = lags_sequence lowercase = scaling lowercase = num_dynamic_real_features lowercase = num_static_real_features lowercase = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) lowercase = cardinality else: lowercase = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) lowercase = embedding_dimension else: lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase = num_parallel_samples # Transformer architecture configuration lowercase = input_size * len(self.lags_sequence ) + self._number_of_features lowercase = d_model lowercase = encoder_attention_heads lowercase = decoder_attention_heads lowercase = encoder_ffn_dim lowercase = decoder_ffn_dim lowercase = encoder_layers lowercase = decoder_layers lowercase = dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = encoder_layerdrop lowercase = decoder_layerdrop lowercase = activation_function lowercase = init_std lowercase = use_cache # Autoformer lowercase = label_length lowercase = moving_average lowercase = autocorrelation_factor super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def SCREAMING_SNAKE_CASE__ ( self ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
565
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __magic_name__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class SCREAMING_SNAKE_CASE__ : snake_case = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "The column name of the images in the files."} ) snake_case = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} ) snake_case = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} ) snake_case = field( default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def __UpperCAmelCase ( self : Dict ): lowerCamelCase__ = {} if self.train_dir is not None: lowerCamelCase__ = self.train_dir if self.validation_dir is not None: lowerCamelCase__ = self.validation_dir lowerCamelCase__ = data_files if data_files else None @dataclass class SCREAMING_SNAKE_CASE__ : snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) snake_case = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) snake_case = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) snake_case = field( default=0.7_5 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) snake_case = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): snake_case = field( default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _A ( ): """simple docstring""" lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase__ = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowerCamelCase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowerCamelCase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowerCamelCase__ = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: lowerCamelCase__ = ds["""train"""].train_test_split(data_args.train_val_split ) lowerCamelCase__ = split["""train"""] lowerCamelCase__ = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase__ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowercase ) elif model_args.model_name_or_path: lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: lowerCamelCase__ = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: lowerCamelCase__ = ViTImageProcessor() # create model if model_args.model_name_or_path: lowerCamelCase__ = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowerCamelCase__ = ViTMAEForPreTraining(__lowercase ) if training_args.do_train: lowerCamelCase__ = ds["""train"""].column_names else: lowerCamelCase__ = ds["""validation"""].column_names if data_args.image_column_name is not None: lowerCamelCase__ = data_args.image_column_name elif "image" in column_names: lowerCamelCase__ = """image""" elif "img" in column_names: lowerCamelCase__ = """img""" else: lowerCamelCase__ = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowerCamelCase__ = image_processor.size["""shortest_edge"""] else: lowerCamelCase__ = (image_processor.size["""height"""], image_processor.size["""width"""]) lowerCamelCase__ = Compose( [ Lambda(lambda __lowercase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__lowercase ): lowerCamelCase__ = [transforms(__lowercase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowerCamelCase__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowerCamelCase__ = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Compute absolute learning rate lowerCamelCase__ = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowerCamelCase__ = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer lowerCamelCase__ = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: lowerCamelCase__ = None if training_args.resume_from_checkpoint is not None: lowerCamelCase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase__ = last_checkpoint lowerCamelCase__ = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCamelCase__ = trainer.evaluate() trainer.log_metrics("""eval""" , __lowercase ) trainer.save_metrics("""eval""" , __lowercase ) # Write model card and (optionally) push to hub lowerCamelCase__ = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) def _A ( __lowercase ): """simple docstring""" main() if __name__ == "__main__": main()
129
"""simple docstring""" def _A ( __lowercase , __lowercase ): """simple docstring""" while second != 0: lowerCamelCase__ = first & second first ^= second lowerCamelCase__ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __magic_name__ = int(input("""Enter the first number: """).strip()) __magic_name__ = int(input("""Enter the second number: """).strip()) print(F'{add(first, second) = }')
129
1
_UpperCamelCase: List[Any] =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] _UpperCamelCase: List[str] =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] _UpperCamelCase: List[Any] ={ 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def _a ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" assert len(str(__SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: _lowerCAmelCase = year // 100 _lowerCAmelCase = (5 * (century % 4) + 2) % 7 _lowerCAmelCase = year % 100 _lowerCAmelCase = centurian % 12 _lowerCAmelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 _lowerCAmelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) _lowerCAmelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
585
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rt in rc.restypes: _lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) _lowerCAmelCase = {name: i for i, name in enumerate(__SCREAMING_SNAKE_CASE )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) _lowerCAmelCase = torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , ) _lowerCAmelCase = torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , ) _lowerCAmelCase = torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['aatype'].device , ) _lowerCAmelCase = protein['aatype'].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein _lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype] _lowerCAmelCase = restype_atomaa_mask[protein_aatype] _lowerCAmelCase = residx_atomaa_mask _lowerCAmelCase = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back _lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype] _lowerCAmelCase = residx_atomaa_to_atomaa.long() # create the corresponding mask _lowerCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device ) for restype, restype_letter in enumerate(rc.restypes ): _lowerCAmelCase = rc.restype_atoa[restype_letter] _lowerCAmelCase = rc.residue_atoms[restype_name] for atom_name in atom_names: _lowerCAmelCase = rc.atom_order[atom_name] _lowerCAmelCase = 1 _lowerCAmelCase = restype_atomaa_mask[protein_aatype] _lowerCAmelCase = residx_atomaa_mask return protein def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ): """simple docstring""" _lowerCAmelCase = tree_map(lambda __SCREAMING_SNAKE_CASE : torch.tensor(__SCREAMING_SNAKE_CASE , device=batch['aatype'].device ) , __SCREAMING_SNAKE_CASE , np.ndarray ) _lowerCAmelCase = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : np.array(__SCREAMING_SNAKE_CASE ) , make_atomaa_masks(__SCREAMING_SNAKE_CASE ) ) return out
585
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _snake_case( __a , __a ): @register_to_config def __init__(self : Tuple , a : List[Any] = 7_68 , ) -> Union[str, Any]: """simple docstring""" super().__init__() A__ = nn.Parameter(torch.zeros(1 , A__ ) ) A__ = nn.Parameter(torch.ones(1 , A__ ) ) def _UpperCamelCase (self : Optional[int] , a : Union[str, Any] = None , a : Optional[int] = None , ) -> List[str]: """simple docstring""" A__ = nn.Parameter(self.mean.to(A__ ).to(A__ ) ) A__ = nn.Parameter(self.std.to(A__ ).to(A__ ) ) return self def _UpperCamelCase (self : Dict , a : int ) -> List[Any]: """simple docstring""" A__ = (embeds - self.mean) * 1.0 / self.std return embeds def _UpperCamelCase (self : int , a : Dict ) -> List[Any]: """simple docstring""" A__ = (embeds * self.std) + self.mean return embeds
531
'''simple docstring''' import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowercase = logging.get_logger(__name__) class _lowercase : def __init__( self , A__ , A__ ) -> Tuple: snake_case = question_encoder snake_case = generator snake_case = self.question_encoder def UpperCamelCase ( self , A__ ) -> int: if os.path.isfile(A__ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(A__ , exist_ok=A__ ) snake_case = os.path.join(A__ , '''question_encoder_tokenizer''' ) snake_case = os.path.join(A__ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(A__ ) self.generator.save_pretrained(A__ ) @classmethod def UpperCamelCase ( cls , A__ , **A__ ) -> List[Any]: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer snake_case = kwargs.pop('''config''' , A__ ) if config is None: snake_case = RagConfig.from_pretrained(A__ ) snake_case = AutoTokenizer.from_pretrained( A__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) snake_case = AutoTokenizer.from_pretrained( A__ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=A__ , generator=A__ ) def __call__( self , *A__ , **A__ ) -> Any: return self.current_tokenizer(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple: return self.generator.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple: return self.generator.decode(*A__ , **A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.question_encoder def UpperCamelCase ( self ) -> str: snake_case = self.generator def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = "longest" , A__ = None , A__ = True , **A__ , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , A__ , ) if max_length is None: snake_case = self.current_tokenizer.model_max_length snake_case = self( A__ , add_special_tokens=A__ , return_tensors=A__ , max_length=A__ , padding=A__ , truncation=A__ , **A__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: snake_case = self.current_tokenizer.model_max_length snake_case = self( text_target=A__ , add_special_tokens=A__ , return_tensors=A__ , padding=A__ , max_length=A__ , truncation=A__ , **A__ , ) snake_case = labels['''input_ids'''] return model_inputs
342
0
'''simple docstring''' import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCamelCase_ ( __a ): def __init__( self : Optional[Any] , _A : UNetaDModel , _A : UNetaDModel , _A : DDPMScheduler , _A : Any , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[str] = value_function UpperCAmelCase__ : Optional[Any] = unet UpperCAmelCase__ : int = scheduler UpperCAmelCase__ : int = env UpperCAmelCase__ : Optional[int] = env.get_dataset() UpperCAmelCase__ : Any = {} for key in self.data.keys(): try: UpperCAmelCase__ : Optional[int] = self.data[key].mean() except: # noqa: E722 pass UpperCAmelCase__ : List[str] = {} for key in self.data.keys(): try: UpperCAmelCase__ : Tuple = self.data[key].std() except: # noqa: E722 pass UpperCAmelCase__ : Tuple = env.observation_space.shape[0] UpperCAmelCase__ : Optional[Any] = env.action_space.shape[0] def lowercase_ ( self : Optional[Any] , _A : Any , _A : int ): '''simple docstring''' return (x_in - self.means[key]) / self.stds[key] def lowercase_ ( self : Optional[int] , _A : str , _A : List[str] ): '''simple docstring''' return x_in * self.stds[key] + self.means[key] def lowercase_ ( self : List[str] , _A : Tuple ): '''simple docstring''' if type(_A ) is dict: return {k: self.to_torch(_A ) for k, v in x_in.items()} elif torch.is_tensor(_A ): return x_in.to(self.unet.device ) return torch.tensor(_A , device=self.unet.device ) def lowercase_ ( self : Optional[Any] , _A : Optional[int] , _A : Dict , _A : Optional[Any] ): '''simple docstring''' for key, val in cond.items(): UpperCAmelCase__ : List[Any] = val.clone() return x_in def lowercase_ ( self : Tuple , _A : Optional[Any] , _A : Any , _A : Optional[Any] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = x.shape[0] UpperCAmelCase__ : List[str] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCAmelCase__ : Tuple = torch.full((batch_size,) , _A , device=self.unet.device , dtype=torch.long ) for _ in range(_A ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCAmelCase__ : str = self.value_function(x.permute(0 , 2 , 1 ) , _A ).sample UpperCAmelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCAmelCase__ : Dict = self.scheduler._get_variance(_A ) UpperCAmelCase__ : str = torch.exp(0.5 * posterior_variance ) UpperCAmelCase__ : Dict = model_std * grad UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : List[str] = x.detach() UpperCAmelCase__ : Tuple = x + scale * grad UpperCAmelCase__ : Tuple = self.reset_xa(_A , _A , self.action_dim ) UpperCAmelCase__ : Optional[int] = self.unet(x.permute(0 , 2 , 1 ) , _A ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCAmelCase__ : Optional[int] = self.scheduler.step(_A , _A , _A , predict_epsilon=_A )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) UpperCAmelCase__ : Any = self.reset_xa(_A , _A , self.action_dim ) UpperCAmelCase__ : Tuple = self.to_torch(_A ) return x, y def __call__( self : Any , _A : Tuple , _A : int=64 , _A : Tuple=32 , _A : List[Any]=2 , _A : List[str]=0.1 ): '''simple docstring''' UpperCAmelCase__ : Any = self.normalize(_A , '''observations''' ) UpperCAmelCase__ : Any = obs[None].repeat(_A , axis=0 ) UpperCAmelCase__ : Optional[Any] = {0: self.to_torch(_A )} UpperCAmelCase__ : Union[str, Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCAmelCase__ : List[Any] = randn_tensor(_A , device=self.unet.device ) UpperCAmelCase__ : Optional[Any] = self.reset_xa(_A , _A , self.action_dim ) UpperCAmelCase__ : List[Any] = self.to_torch(_A ) # run the diffusion process UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.run_diffusion(_A , _A , _A , _A ) # sort output trajectories by value UpperCAmelCase__ : List[str] = y.argsort(0 , descending=_A ).squeeze() UpperCAmelCase__ : int = x[sorted_idx] UpperCAmelCase__ : Optional[int] = sorted_values[:, :, : self.action_dim] UpperCAmelCase__ : int = actions.detach().cpu().numpy() UpperCAmelCase__ : Optional[int] = self.de_normalize(_A , key='''actions''' ) # select the action with the highest value if y is not None: UpperCAmelCase__ : List[Any] = 0 else: # if we didn't run value guiding, select a random action UpperCAmelCase__ : Any = np.random.randint(0 , _A ) UpperCAmelCase__ : List[str] = denorm_actions[selected_index, 0] return denorm_actions
701
'''simple docstring''' from timeit import timeit def a__ ( lowerCAmelCase__ ) -> int: if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase__ : Tuple = 0 while number: number &= number - 1 result += 1 return result def a__ ( lowerCAmelCase__ ) -> int: if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase__ : Optional[Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def a__ ( ) -> None: def do_benchmark(lowerCAmelCase__ ) -> None: UpperCAmelCase__ : Optional[Any] = '''import __main__ as z''' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }""" ) UpperCAmelCase__ : Optional[int] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=lowerCAmelCase__ ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }""" ) UpperCAmelCase__ : Optional[int] = timeit( '''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=lowerCAmelCase__ , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(lowerCAmelCase__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
312
0
"""simple docstring""" from __future__ import annotations from cmath import sqrt def __a ( A , A , A ) -> tuple[complex, complex]: '''simple docstring''' if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) A__ = b * b - 4 * a * c A__ = (-b + sqrt(A )) / (2 * a) A__ = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __a ( ) -> Optional[Any]: '''simple docstring''' A__ , A__ = quadratic_roots(a=5 , b=6 , c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
337
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
337
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class __snake_case ( __lowerCAmelCase ): def __init__( self , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__: List[Any] = params a__: Optional[Any] = np.array(lowercase) a__: Optional[Any] = np.array([len(lowercase) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , lowercase) -> Union[str, Any]: '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self) -> Optional[Any]: '''simple docstring''' return len(self.lengths) def lowerCamelCase_ ( self) -> Any: '''simple docstring''' assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def lowerCamelCase_ ( self) -> int: '''simple docstring''' a__: List[Any] = self.params.max_model_input_size a__: Any = self.lengths > max_len logger.info(f'Splitting {sum(lowercase)} too long sequences.') def divide_chunks(lowercase , lowercase): return [l[i : i + n] for i in range(0 , len(lowercase) , lowercase)] a__: Optional[int] = [] a__: int = [] if self.params.mlm: a__ , a__: Union[str, Any] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: a__ , a__: Optional[int] = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: a__: List[str] = [] for sub_s in divide_chunks(seq_ , max_len - 2): if sub_s[0] != cls_id: a__: Tuple = np.insert(lowercase , 0 , lowercase) if sub_s[-1] != sep_id: a__: int = np.insert(lowercase , len(lowercase) , lowercase) assert len(lowercase) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowercase) new_tok_ids.extend(lowercase) new_lengths.extend([len(lowercase) for l in sub_seqs]) a__: int = np.array(lowercase) a__: Optional[int] = np.array(lowercase) def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: Union[str, Any] = len(self) a__: int = self.lengths > 11 a__: int = self.token_ids[indices] a__: int = self.lengths[indices] a__: Tuple = len(self) logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.') def lowerCamelCase_ ( self) -> int: '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: a__: Optional[int] = self.params.special_tok_ids['unk_token'] a__: Union[str, Any] = len(self) a__: Any = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) a__: Optional[Any] = (unk_occs / self.lengths) < 0.5 a__: int = self.token_ids[indices] a__: Union[str, Any] = self.lengths[indices] a__: Optional[int] = len(self) logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).') def lowerCamelCase_ ( self) -> int: '''simple docstring''' if not self.params.is_master: return logger.info(f'{len(self)} sequences') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def lowerCamelCase_ ( self , lowercase) -> List[str]: '''simple docstring''' a__: Union[str, Any] = [t[0] for t in batch] a__: List[str] = [t[1] for t in batch] assert len(lowercase) == len(lowercase) # Max for paddings a__: int = max(lowercase) # Pad token ids if self.params.mlm: a__: int = self.params.special_tok_ids['pad_token'] else: a__: List[Any] = self.params.special_tok_ids['unk_token'] a__: str = [list(t.astype(lowercase)) + [pad_idx] * (max_seq_len_ - len(lowercase)) for t in token_ids] assert len(tk_) == len(lowercase) assert all(len(lowercase) == max_seq_len_ for t in tk_) a__: str = torch.tensor(tk_) # (bs, max_seq_len_) a__: Dict = torch.tensor(lowercase) # (bs) return tk_t, lg_t
217
"""simple docstring""" import re def __a ( _SCREAMING_SNAKE_CASE ) ->list: return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )] def __a ( _SCREAMING_SNAKE_CASE ) ->str: a__: int = split_input(str_ ) return "".join( [''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: try: a__: List[str] = split_input(_SCREAMING_SNAKE_CASE ) if upper: a__: Optional[int] = ''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: a__: Optional[Any] = ''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __a ( _SCREAMING_SNAKE_CASE ) ->str: return to_simple_case(_SCREAMING_SNAKE_CASE ) def __a ( _SCREAMING_SNAKE_CASE ) ->str: try: a__: Union[str, Any] = to_simple_case(_SCREAMING_SNAKE_CASE ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' ) if __name__ == "__main__": __import__('doctest').testmod()
217
1
"""simple docstring""" import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig _lowerCAmelCase : List[Any] = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } _lowerCAmelCase : List[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''maskformer''' SCREAMING_SNAKE_CASE_ ={'''hidden_size''': '''mask_feature_size'''} SCREAMING_SNAKE_CASE_ =['''resnet''', '''swin'''] SCREAMING_SNAKE_CASE_ =['''detr'''] def __init__( self : List[Any] , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : float = 0.1 , snake_case__ : bool = False , snake_case__ : Optional[Dict] = None , snake_case__ : Optional[Dict] = None , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 20.0 , snake_case__ : Optional[bool] = None , **snake_case__ : Dict , ): '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k UpperCAmelCase__ : Any = SwinConfig( image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : List[Any] = backbone_config.pop("model_type" ) UpperCAmelCase__ : Any = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase__ : int = config_class.from_dict(snake_case__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ' f'Supported model types: {",".join(self.backbones_supported )}' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 UpperCAmelCase__ : str = DetrConfig() else: # verify that the decoder is supported UpperCAmelCase__ : Optional[int] = ( decoder_config.pop("model_type" ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f'Transformer Decoder {decoder_type} not supported, please use one of' f' {",".join(self.decoders_supported )}' ) if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Any = CONFIG_MAPPING[decoder_type] UpperCAmelCase__ : Union[str, Any] = config_class.from_dict(snake_case__ ) UpperCAmelCase__ : str = backbone_config UpperCAmelCase__ : Optional[Any] = decoder_config # main feature dimension for the model UpperCAmelCase__ : Tuple = fpn_feature_size UpperCAmelCase__ : Optional[Any] = mask_feature_size # initializer UpperCAmelCase__ : Tuple = init_std UpperCAmelCase__ : List[Any] = init_xavier_std # Hungarian matcher && loss UpperCAmelCase__ : str = cross_entropy_weight UpperCAmelCase__ : int = dice_weight UpperCAmelCase__ : Dict = mask_weight UpperCAmelCase__ : Union[str, Any] = use_auxiliary_loss UpperCAmelCase__ : Optional[int] = no_object_weight UpperCAmelCase__ : Optional[int] = output_auxiliary_logits UpperCAmelCase__ : Union[str, Any] = self.decoder_config.encoder_attention_heads UpperCAmelCase__ : Tuple = self.decoder_config.num_hidden_layers super().__init__(**snake_case__ ) @classmethod def __a ( cls : Union[str, Any] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Optional[Any] ): '''simple docstring''' return cls( backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.backbone_config.to_dict() UpperCAmelCase__ : Union[str, Any] = self.decoder_config.to_dict() UpperCAmelCase__ : List[Any] = self.__class__.model_type return output
438
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Tuple = { """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''mra''' def __init__( self : Any , snake_case__ : List[str]=5_0_2_6_5 , snake_case__ : Any=7_6_8 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : str="gelu" , snake_case__ : Any=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : Union[str, Any]=1 , snake_case__ : List[Any]=0.02 , snake_case__ : str=1e-5 , snake_case__ : List[Any]="absolute" , snake_case__ : str=4 , snake_case__ : List[str]="full" , snake_case__ : Tuple=0 , snake_case__ : Any=0 , snake_case__ : Union[str, Any]=1 , snake_case__ : int=0 , snake_case__ : int=2 , **snake_case__ : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : int = intermediate_size UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : List[str] = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : Tuple = position_embedding_type UpperCAmelCase__ : List[str] = block_per_row UpperCAmelCase__ : Optional[Any] = approx_mode UpperCAmelCase__ : Any = initial_prior_first_n_blocks UpperCAmelCase__ : List[Any] = initial_prior_diagonal_n_blocks
438
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase__ : Any = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : int = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
495
import sys lowerCamelCase__ : List[Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def UpperCamelCase ( lowercase_ = N ) -> int: '''simple docstring''' lowercase__ : int = -sys.maxsize - 1 for i in range(len(lowercase_ ) - 12 ): lowercase__ : Optional[int] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: lowercase__ : int = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
495
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
251
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowerCAmelCase :str = logging.get_logger(__name__) @dataclass class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case__ : List[str] = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **lowercase__ ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE : Tuple = deprecated_arg[3:] SCREAMING_SNAKE_CASE : Optional[int] = not kwargs.pop(lowercase__ ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('tpu_name' , self.tpu_name ) SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('device_idx' , self.device_idx ) SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('eager_mode' , self.eager_mode ) SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('use_xla' , self.use_xla ) super().__init__(**lowercase__ ) snake_case__ : str = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Name of TPU"} , ) snake_case__ : int = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) snake_case__ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark models in eager model."} ) snake_case__ : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _UpperCamelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['tf'] ) SCREAMING_SNAKE_CASE : List[Any] = None if self.tpu: try: if self.tpu_name: SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: SCREAMING_SNAKE_CASE : str = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: SCREAMING_SNAKE_CASE : str = None return tpu @cached_property def _UpperCamelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' ) SCREAMING_SNAKE_CASE : List[str] = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , 'GPU' ) # disable GPU SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _UpperCamelCase ( self ) -> bool: requires_backends(self , ['tf'] ) return self._setup_tpu is not None @property def _UpperCamelCase ( self ) -> "tf.distribute.Strategy": requires_backends(self , ['tf'] ) return self._setup_strategy @property def _UpperCamelCase ( self ) -> Optional[int]: requires_backends(self , ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _UpperCamelCase ( self ) -> int: requires_backends(self , ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _UpperCamelCase ( self ) -> bool: return self.n_gpu > 0
251
1
'''simple docstring''' def snake_case ( snake_case : str ) -> list: """simple docstring""" return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
514
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def snake_case ( snake_case : List[str] , snake_case : int="shi-labs/oneformer_demo" ) -> Any: """simple docstring""" with open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) as f: lowerCAmelCase = json.load(snake_case ) lowerCAmelCase = {} lowerCAmelCase = [] lowerCAmelCase = [] for key, info in class_info.items(): lowerCAmelCase = info['name'] class_names.append(info['name'] ) if info["isthing"]: thing_ids.append(int(snake_case ) ) lowerCAmelCase = thing_ids lowerCAmelCase = class_names return metadata class _snake_case ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_55 , _SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" , _SCREAMING_SNAKE_CASE="ade20k_panoptic.json" , _SCREAMING_SNAKE_CASE=10 , ): '''simple docstring''' lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size lowerCAmelCase = do_normalize lowerCAmelCase = image_mean lowerCAmelCase = image_std lowerCAmelCase = class_info_file lowerCAmelCase = prepare_metadata(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = num_text lowerCAmelCase = repo_path # for the post_process_functions lowerCAmelCase = 2 lowerCAmelCase = 10 lowerCAmelCase = 10 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = num_labels lowerCAmelCase = do_reduce_labels lowerCAmelCase = ignore_index def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): '''simple docstring''' if not batched: lowerCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): lowerCAmelCase , lowerCAmelCase = image.size else: lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2] if w < h: lowerCAmelCase = int(self.size['shortest_edge'] * h / w ) lowerCAmelCase = self.size['shortest_edge'] elif w > h: lowerCAmelCase = self.size['shortest_edge'] lowerCAmelCase = int(self.size['shortest_edge'] * w / h ) else: lowerCAmelCase = self.size['shortest_edge'] lowerCAmelCase = self.size['shortest_edge'] else: lowerCAmelCase = [] for image in image_inputs: lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class _snake_case ( a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string SCREAMING_SNAKE_CASE : str = image_processing_class def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = OneFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'ignore_index' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'class_info_file' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'num_text' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'repo_path' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'metadata' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_reduce_labels' ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor( _SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor( _SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor( _SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="np" ): '''simple docstring''' lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # prepare image and target lowerCAmelCase = self.image_processing_tester.num_labels lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) if with_segmentation_maps: lowerCAmelCase = num_labels if is_instance_map: lowerCAmelCase = list(range(_SCREAMING_SNAKE_CASE ) ) * 2 lowerCAmelCase = dict(enumerate(_SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": lowerCAmelCase = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for annotation in annotations] lowerCAmelCase = image_processor( _SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , return_tensors='pt' , instance_id_to_semantic_id=_SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE , ) return inputs def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' def common(_SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ): lowerCAmelCase = self.comm_get_image_processor_inputs( with_segmentation_maps=_SCREAMING_SNAKE_CASE , is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = inputs['mask_labels'] lowerCAmelCase = inputs['class_labels'] lowerCAmelCase = inputs['pixel_values'] lowerCAmelCase = inputs['text_inputs'] # check the batch_size for mask_label, class_label, text_input in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_SCREAMING_SNAKE_CASE ) common(is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type='pil' ) common(is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type='pil' ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = np.zeros((20, 50) ) lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = binary_mask_to_rle(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )] lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(_SCREAMING_SNAKE_CASE , target_sizes=_SCREAMING_SNAKE_CASE ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase = image_processor.post_process_instance_segmentation(_SCREAMING_SNAKE_CASE , threshold=0 ) self.assertTrue(len(_SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('segmentation' in el ) self.assertTrue('segments_info' in el ) self.assertEqual(type(el['segments_info'] ) , _SCREAMING_SNAKE_CASE ) self.assertEqual( el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase = image_processor.post_process_panoptic_segmentation(_SCREAMING_SNAKE_CASE , threshold=0 ) self.assertTrue(len(_SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('segmentation' in el ) self.assertTrue('segments_info' in el ) self.assertEqual(type(el['segments_info'] ) , _SCREAMING_SNAKE_CASE ) self.assertEqual( el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
514
1
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : '''simple docstring''' def __init__( self , A=None ) -> List[str]: _UpperCAmelCase : Tuple = data _UpperCAmelCase : Union[str, Any] = None def __repr__( self ) -> List[str]: _UpperCAmelCase : Optional[int] = [] _UpperCAmelCase : str = self while temp: string_rep.append(f'{temp.data}' ) _UpperCAmelCase : Dict = temp.next return "->".join(A ) def lowerCamelCase_ (UpperCamelCase__ : list ): if not elements_list: raise Exception('''The Elements List is empty''' ) _UpperCAmelCase : Union[str, Any] = Node(elements_list[0] ) for i in range(1 , len(UpperCamelCase__ ) ): _UpperCAmelCase : Tuple = Node(elements_list[i] ) _UpperCAmelCase : Optional[int] = current.next return head def lowerCamelCase_ (UpperCamelCase__ : Node ): if head_node is not None and isinstance(UpperCamelCase__ , UpperCamelCase__ ): print_reverse(head_node.next ) print(head_node.data ) def lowerCamelCase_ (): from doctest import testmod testmod() _UpperCAmelCase : str = make_linked_list([14, 52, 14, 12, 43] ) print('''Linked List:''' ) print(UpperCamelCase__ ) print('''Elements in Reverse:''' ) print_reverse(UpperCamelCase__ ) if __name__ == "__main__": main()
506
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 7 , UpperCamelCase__ : int = 100_0000 ): _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Optional[int] = 1 for current_denominator in range(1 , limit + 1 ): _UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _UpperCAmelCase : List[Any] = current_numerator _UpperCAmelCase : Any = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
506
1
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class __snake_case ( UpperCamelCase_ ): """simple docstring""" UpperCamelCase_ = 'EncodecFeatureExtractor' UpperCamelCase_ = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self : int ,lowerCAmelCase__ : str ,lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' super().__init__(lowerCAmelCase__ ,lowerCAmelCase__ ) lowerCAmelCase_ : Any = self.feature_extractor lowerCAmelCase_ : Any = False def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : List[str]=True ) -> Tuple: '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ ,language=lowerCAmelCase__ ,no_timestamps=lowerCAmelCase__ ) def __call__( self : Optional[Any] ,*lowerCAmelCase__ : int ,**lowerCAmelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase__ ,**lowerCAmelCase__ ) lowerCAmelCase_ : str = kwargs.pop("audio" ,lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = kwargs.pop("sampling_rate" ,lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = kwargs.pop("text" ,lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: lowerCAmelCase_ : Union[str, Any] = args[0] lowerCAmelCase_ : Optional[int] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: lowerCAmelCase_ : List[Any] = self.tokenizer(lowerCAmelCase__ ,**lowerCAmelCase__ ) if audio is not None: lowerCAmelCase_ : List[str] = self.feature_extractor(lowerCAmelCase__ ,*lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,**lowerCAmelCase__ ) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCAmelCase_ : List[Any] = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: lowerCAmelCase_ : Optional[int] = audio_inputs['''padding_mask'''] return inputs def UpperCAmelCase_ ( self : Any ,*lowerCAmelCase__ : Tuple ,**lowerCAmelCase__ : Any ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ : int = kwargs.pop("audio" ,lowerCAmelCase__ ) lowerCAmelCase_ : int = kwargs.pop("padding_mask" ,lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: lowerCAmelCase_ : Optional[int] = args[0] lowerCAmelCase_ : Dict = args[1:] if audio_values is not None: return self._decode_audio(lowerCAmelCase__ ,padding_mask=lowerCAmelCase__ ) else: return self.tokenizer.batch_decode(*lowerCAmelCase__ ,**lowerCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ,*lowerCAmelCase__ : str ,**lowerCAmelCase__ : Any ) -> List[Any]: '''simple docstring''' return self.tokenizer.decode(*lowerCAmelCase__ ,**lowerCAmelCase__ ) def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : List[Any] = None ) -> List[np.ndarray]: '''simple docstring''' lowerCAmelCase_ : Dict = to_numpy(lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = audio_values.shape if padding_mask is None: return list(lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = to_numpy(lowerCAmelCase__ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCAmelCase_ : Union[str, Any] = seq_len - padding_mask.shape[-1] lowerCAmelCase_ : List[str] = 1 - self.feature_extractor.padding_value lowerCAmelCase_ : str = np.pad(lowerCAmelCase__ ,((0, 0), (0, difference)) ,"constant" ,constant_values=lowerCAmelCase__ ) lowerCAmelCase_ : int = audio_values.tolist() for i in range(lowerCAmelCase__ ): lowerCAmelCase_ : Optional[Any] = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCAmelCase_ : int = sliced_audio.reshape(lowerCAmelCase__ ,-1 ) return audio_values
719
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _lowercase = logging.get_logger(__name__) class __snake_case ( snake_case__ ): """simple docstring""" UpperCamelCase_ = ['input_features', 'is_longer'] def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]: '''simple docstring''' super().__init__( feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,) lowerCAmelCase_ : Optional[Any] = top_db lowerCAmelCase_ : str = truncation lowerCAmelCase_ : Tuple = padding lowerCAmelCase_ : str = fft_window_size lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1 lowerCAmelCase_ : Dict = hop_length lowerCAmelCase_ : Any = max_length_s lowerCAmelCase_ : int = max_length_s * sampling_rate lowerCAmelCase_ : Optional[int] = sampling_rate lowerCAmelCase_ : int = frequency_min lowerCAmelCase_ : Optional[Any] = frequency_max lowerCAmelCase_ : List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,) lowerCAmelCase_ : List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,) def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]: '''simple docstring''' lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : Optional[int] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ : Optional[Any] = spectrogram( lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,) return log_mel_spectrogram.T def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase_ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase_ : List[Any] = [0] # randomly choose index for each part lowerCAmelCase_ : str = np.random.choice(ranges[0] ) lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] ) lowerCAmelCase_ : Any = np.random.choice(ranges[2] ) lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :] lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :] lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] ) lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate( lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ ) lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy() lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCAmelCase_ : List[Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 ) lowerCAmelCase_ : Dict = waveform[idx : idx + max_length] lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters ) lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCAmelCase_ : List[str] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 ) lowerCAmelCase_ : int = False else: lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) lowerCAmelCase_ : Any = True else: raise NotImplementedError(f'''data_truncating {truncation} not implemented''' ) else: lowerCAmelCase_ : Dict = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) ) lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) ) lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) ) lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 ) if truncation == "fusion": lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters ) lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature: '''simple docstring''' lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation lowerCAmelCase_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase_ : Dict = is_batched_numpy or ( isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ): lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase_ : Any = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )] # convert to mel spectrogram, truncate and pad if needed. lowerCAmelCase_ : Optional[Any] = [ self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ ) for waveform in raw_speech ] lowerCAmelCase_ : str = [] lowerCAmelCase_ : str = [] for mel, longer in padded_inputs: input_mel.append(lowerCAmelCase__ ) is_longer.append(lowerCAmelCase__ ) if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) ) lowerCAmelCase_ : Dict = True if isinstance(input_mel[0] ,lowerCAmelCase__ ): lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer] lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer} lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ ) if return_tensors is not None: lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ ) return input_features
683
0
def A__ ( lowercase: int, lowercase: int ) -> int: return int((input_a, input_a).count(0 ) != 0 ) def A__ ( ) -> None: assert nand_gate(0, 0 ) == 1 assert nand_gate(0, 1 ) == 1 assert nand_gate(1, 0 ) == 1 assert nand_gate(1, 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
305
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[int]: A : Optional[int] =parent A : Dict =13 A : List[str] =7 A : Any =True A : str =True A : Optional[int] =True A : Union[str, Any] =99 A : List[Any] =32 A : Optional[Any] =2 A : int =4 A : List[Any] =37 A : Any ='gelu' A : Optional[Any] =0.1 A : Optional[Any] =0.1 A : List[Any] =5_12 A : Optional[Any] =16 A : Optional[Any] =2 A : Dict =0.0_2 A : Dict =3 A : Union[str, Any] =4 A : int =None def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any: A : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : List[Any] =None if self.use_input_mask: A : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) A : List[str] =None A : Tuple =None A : List[str] =None if self.use_labels: A : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : List[str] =ids_tensor([self.batch_size] , self.num_choices ) A : str =EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : Any =self.prepare_config_and_inputs() A : Dict =True A : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : Any =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: A : List[Any] =TFEsmModel(config=SCREAMING_SNAKE_CASE__ ) A : str ={'input_ids': input_ids, 'attention_mask': input_mask} A : str =model(SCREAMING_SNAKE_CASE__ ) A : Any =[input_ids, input_mask] A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ ) A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Optional[int]: A : List[Any] =True A : List[Any] =TFEsmModel(config=SCREAMING_SNAKE_CASE__ ) A : Dict ={ 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } A : Union[str, Any] =model(SCREAMING_SNAKE_CASE__ ) A : Optional[int] =[input_ids, input_mask] A : Dict =model(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ) # Also check the case where encoder outputs are not passed A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: A : Any =TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) A : Union[str, Any] =model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]: A : Optional[int] =self.num_labels A : List[str] =TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) A : Optional[int] ={'input_ids': input_ids, 'attention_mask': input_mask} A : Optional[int] =model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple: A : Optional[Any] =self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[str] =config_and_inputs A : str ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' lowercase : Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowercase : Tuple = ( { "feature-extraction": TFEsmModel, "fill-mask": TFEsmForMaskedLM, "text-classification": TFEsmForSequenceClassification, "token-classification": TFEsmForTokenClassification, "zero-shot": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowercase : str = False lowercase : Optional[Any] = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]: A : Any =TFEsmModelTester(self ) A : Any =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]: A : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]: A : Optional[int] =self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]: A : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int: A : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] =TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @unittest.skip('Protein models do not support embedding resizing.' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip('Protein models do not support embedding resizing.' ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple: pass def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]: A , A : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] =model_class(SCREAMING_SNAKE_CASE__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A : Any =model.get_bias() assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for k, v in name.items(): assert isinstance(SCREAMING_SNAKE_CASE__ , tf.Variable ) else: A : List[Any] =model.get_output_embeddings() assert x is None A : Optional[Any] =model.get_bias() assert name is None @require_tf class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]: A : Optional[Any] =TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A : Any =tf.constant([[0, 1, 2, 3, 4, 5]] ) A : Dict =model(SCREAMING_SNAKE_CASE__ )[0] A : str =[1, 6, 33] self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE__ ) # compare the actual values for a slice. A : Dict =tf.constant( [ [ [8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7], [-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5], [-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]: A : Optional[int] =TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A : Union[str, Any] =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A : str =model(SCREAMING_SNAKE_CASE__ )[0] # compare the actual values for a slice. A : str =tf.constant( [ [ [0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9], [0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2], [0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
305
1
'''simple docstring''' def __lowerCamelCase ( __lowerCAmelCase : str ) -> str: return "".join(chr(ord(__lowerCAmelCase ) - 32 ) if """a""" <= char <= """z""" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
517
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _lowerCAmelCase ( ctypes.Structure ): """simple docstring""" snake_case_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def __lowerCamelCase ( ) -> Optional[int]: if os.name == "nt": snake_case = CursorInfo() snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) ) snake_case = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def __lowerCamelCase ( ) -> Tuple: if os.name == "nt": snake_case = CursorInfo() snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) ) snake_case = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def __lowerCamelCase ( ) -> Optional[Any]: try: hide_cursor() yield finally: show_cursor()
517
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''', # See all REALM models at https://huggingface.co/models?filter=realm } class snake_case_ ( _UpperCAmelCase ): """simple docstring""" snake_case__ = """realm""" def __init__(self: Dict , __UpperCAmelCase: Dict=30522 , __UpperCAmelCase: Optional[int]=768 , __UpperCAmelCase: List[str]=128 , __UpperCAmelCase: List[str]=12 , __UpperCAmelCase: int=12 , __UpperCAmelCase: Any=8 , __UpperCAmelCase: List[Any]=3072 , __UpperCAmelCase: Union[str, Any]="gelu_new" , __UpperCAmelCase: List[Any]=0.1 , __UpperCAmelCase: Optional[Any]=0.1 , __UpperCAmelCase: List[str]=512 , __UpperCAmelCase: List[str]=2 , __UpperCAmelCase: Dict=0.02 , __UpperCAmelCase: int=1E-12 , __UpperCAmelCase: List[Any]=256 , __UpperCAmelCase: Dict=10 , __UpperCAmelCase: Dict=1E-3 , __UpperCAmelCase: Optional[Any]=5 , __UpperCAmelCase: List[Any]=320 , __UpperCAmelCase: int=13353718 , __UpperCAmelCase: List[Any]=5000 , __UpperCAmelCase: str=1 , __UpperCAmelCase: Dict=0 , __UpperCAmelCase: int=2 , **__UpperCAmelCase: Any , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) # Common config __a : str = vocab_size __a : Tuple = max_position_embeddings __a : Dict = hidden_size __a : Optional[Any] = retriever_proj_size __a : int = num_hidden_layers __a : List[Any] = num_attention_heads __a : Union[str, Any] = num_candidates __a : str = intermediate_size __a : Optional[Any] = hidden_act __a : str = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Any = initializer_range __a : Optional[int] = type_vocab_size __a : List[str] = layer_norm_eps # Reader config __a : int = span_hidden_size __a : Tuple = max_span_width __a : Dict = reader_layer_norm_eps __a : List[Any] = reader_beam_size __a : str = reader_seq_len # Retrieval config __a : Dict = num_block_records __a : Any = searcher_beam_size
351
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase : def __init__( self , A_ , A_=13 , A_=[30, 30] , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=8 , A_=10 , ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : List[Any] = batch_size __lowerCAmelCase : List[Any] = image_size __lowerCAmelCase : Tuple = patch_size __lowerCAmelCase : int = num_channels __lowerCAmelCase : Tuple = is_training __lowerCAmelCase : Optional[Any] = use_labels __lowerCAmelCase : str = hidden_size __lowerCAmelCase : Dict = num_hidden_layers __lowerCAmelCase : Optional[Any] = num_attention_heads __lowerCAmelCase : Optional[int] = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : Dict = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : Optional[Any] = type_sequence_label_size __lowerCAmelCase : Optional[Any] = initializer_range __lowerCAmelCase : List[Any] = num_labels __lowerCAmelCase : str = scope __lowerCAmelCase : Union[str, Any] = n_targets __lowerCAmelCase : Tuple = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) __lowerCAmelCase : List[Any] = num_patches + 1 + self.num_detection_tokens def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) __lowerCAmelCase : str = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __lowerCAmelCase : Union[str, Any] = [] for i in range(self.batch_size ): __lowerCAmelCase : int = {} __lowerCAmelCase : Union[str, Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=A_ ) __lowerCAmelCase : List[Any] = torch.rand(self.n_targets , 4 , device=A_ ) labels.append(A_ ) __lowerCAmelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = YolosModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = YolosForObjectDetection(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : List[str] = model(pixel_values=A_ ) __lowerCAmelCase : List[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) __lowerCAmelCase : str = model(pixel_values=A_ , labels=A_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : str = self.prepare_config_and_inputs() __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = config_and_inputs __lowerCAmelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _UpperCamelCase = ( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self , A_ , A_ , A_=False ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __lowerCAmelCase : Union[str, Any] = [] for i in range(self.model_tester.batch_size ): __lowerCAmelCase : List[str] = {} __lowerCAmelCase : Optional[int] = torch.ones( size=(self.model_tester.n_targets,) , device=A_ , dtype=torch.long ) __lowerCAmelCase : str = torch.ones( self.model_tester.n_targets , 4 , device=A_ , dtype=torch.float ) labels.append(A_ ) __lowerCAmelCase : Union[str, Any] = labels return inputs_dict def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = YolosModelTester(self ) __lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' pass def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : List[str] = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[int] = model_class(A_ ) __lowerCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : Dict = [*signature.parameters.keys()] __lowerCAmelCase : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A_ ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : List[Any] = True # in YOLOS, the seq_len is different __lowerCAmelCase : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __lowerCAmelCase : str = True __lowerCAmelCase : List[str] = False __lowerCAmelCase : List[str] = True __lowerCAmelCase : Dict = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): __lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) ) __lowerCAmelCase : Dict = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCAmelCase : str = True __lowerCAmelCase : Tuple = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(A_ , A_ ) ) __lowerCAmelCase : Any = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __lowerCAmelCase : List[str] = len(A_ ) # Check attention is always last and order is fine __lowerCAmelCase : str = True __lowerCAmelCase : Tuple = True __lowerCAmelCase : Dict = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): __lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) ) __lowerCAmelCase : Optional[int] = 1 self.assertEqual(out_len + added_hidden_states , len(A_ ) ) __lowerCAmelCase : Optional[int] = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' def check_hidden_states_output(A_ , A_ , A_ ): __lowerCAmelCase : Optional[Any] = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) ) __lowerCAmelCase : str = outputs.hidden_states __lowerCAmelCase : List[Any] = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(A_ ) , A_ ) # YOLOS has a different seq_length __lowerCAmelCase : List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Tuple = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCAmelCase : Dict = True check_hidden_states_output(A_ , A_ , A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*A_ ) @slow def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Union[str, Any] = YolosModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _lowercase ( ): __lowerCAmelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowercase (unittest.TestCase ): @cached_property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(A_ ) __lowerCAmelCase : Optional[Any] = self.default_image_processor __lowerCAmelCase : Optional[Any] = prepare_img() __lowerCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ ) # forward pass with torch.no_grad(): __lowerCAmelCase : str = model(inputs.pixel_values ) # verify outputs __lowerCAmelCase : Optional[Any] = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , A_ ) __lowerCAmelCase : Tuple = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=A_ , ) __lowerCAmelCase : Optional[int] = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A_ , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , A_ , atol=1e-4 ) ) # verify postprocessing __lowerCAmelCase : Optional[Any] = image_processor.post_process_object_detection( A_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] __lowerCAmelCase : Tuple = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(A_ ) __lowerCAmelCase : Any = [75, 75, 17, 63, 17] __lowerCAmelCase : Tuple = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(A_ ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , A_ , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , A_ ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , A_ ) )
492
0
UpperCAmelCase__ : Any =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase__ : Union[str, Any] =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase__ : List[Any] ={ 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: lowerCamelCase =year // 1_00 lowerCamelCase =(5 * (century % 4) + 2) % 7 lowerCamelCase =year % 1_00 lowerCamelCase =centurian % 12 lowerCamelCase =( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 lowerCamelCase =( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) lowerCamelCase =(dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
711
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() UpperCAmelCase__ : int =logging.get_logger(__name__) def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: lowerCamelCase =UniSpeechSatForSequenceClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase ) lowerCamelCase =downstream_dict["""projector.weight"""] lowerCamelCase =downstream_dict["""projector.bias"""] lowerCamelCase =downstream_dict["""model.post_net.linear.weight"""] lowerCamelCase =downstream_dict["""model.post_net.linear.bias"""] return model def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: lowerCamelCase =UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase ) lowerCamelCase =downstream_dict["""model.linear.weight"""] lowerCamelCase =downstream_dict["""model.linear.bias"""] return model def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: lowerCamelCase =UniSpeechSatForXVector.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase ) lowerCamelCase =downstream_dict["""connector.weight"""] lowerCamelCase =downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCamelCase =downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] lowerCamelCase =downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] lowerCamelCase =downstream_dict["""objective.W"""] return model @torch.no_grad() def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" ) lowerCamelCase =checkpoint["""Downstream"""] lowerCamelCase =UniSpeechSatConfig.from_pretrained(_UpperCAmelCase ) lowerCamelCase =WavaVecaFeatureExtractor.from_pretrained( _UpperCAmelCase , return_attention_mask=_UpperCAmelCase , do_normalize=_UpperCAmelCase ) lowerCamelCase =hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): lowerCamelCase =convert_classification(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) elif arch.endswith("""ForAudioFrameClassification""" ): lowerCamelCase =convert_diarization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) elif arch.endswith("""ForXVector""" ): lowerCamelCase =convert_xvector(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: lowerCamelCase =checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(_UpperCAmelCase ) hf_model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ : Dict =argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') UpperCAmelCase__ : List[Any] =parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
269
0
def a_ (__A , __A , __A ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__A ) ) def a_ (__A , __A , __A , __A ) -> bool: """simple docstring""" # Base Case if index == len(__A ): return True # Recursive Step for i in range(__A ): if valid_coloring(graph[index] , __A , __A ): # Color current vertex __a : Optional[int] = i # Validate coloring if util_color(__A , __A , __A , index + 1 ): return True # Backtrack __a : Dict = -1 return False def a_ (__A , __A ) -> list[int]: """simple docstring""" __a : Optional[Any] = [-1] * len(__A ) if util_color(__A , __A , __A , 0 ): return colored_vertices return []
351
from abc import ABC, abstractmethod from argparse import ArgumentParser class snake_case_ ( __UpperCamelCase ): """simple docstring""" @staticmethod @abstractmethod def UpperCAmelCase__ (__UpperCAmelCase: ArgumentParser ) -> Tuple: '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCAmelCase__ (self: List[str] ) -> List[str]: '''simple docstring''' raise NotImplementedError()
351
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _UpperCAmelCase ( a : List[str] ) -> str: """simple docstring""" lowercase_ : List[Any] = [] for line in lines: lowercase_ : Tuple = re.sub(R'#.*' , '' , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowercase_ : Optional[int] = '\n'.join(__lowerCAmelCase ) # Make a hash from all this code lowercase_ : str = full_str.encode('utf-8' ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching A: Optional[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions A: List[str] = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) A: Dict = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name A: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
708
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = '' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]: super().__init__(self , **_lowercase ) lowercase_ : int = repo_info lowercase_ : List[Any] = token lowercase_ : Union[str, Any] = None def lowerCamelCase__ ( self ) -> Optional[Any]: if self.dir_cache is None: lowercase_ : Optional[Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes lowercase_ : str = { 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict: if not isinstance(self.repo_info , _lowercase ): raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" ) lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha ) return fsspec.open( _lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open() def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple: self._get_dirs() lowercase_ : str = self._strip_protocol(_lowercase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowercase ) def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]: self._get_dirs() lowercase_ : List[str] = PurePosixPath(path.strip('/' ) ) lowercase_ : List[str] = {} for p, f in self.dir_cache.items(): lowercase_ : Tuple = PurePosixPath(p.strip('/' ) ) lowercase_ : Optional[int] = p.parent if root == path: lowercase_ : List[str] = f lowercase_ : List[str] = list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
7
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Any = parent lowercase__ : Optional[Any] = 13 lowercase__ : Any = 7 lowercase__ : Optional[Any] = 30 lowercase__ : int = self.seq_length + self.mem_len lowercase__ : str = 15 lowercase__ : int = True lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = 99 lowercase__ : Any = [10, 50, 80] lowercase__ : str = 32 lowercase__ : Tuple = 32 lowercase__ : int = 4 lowercase__ : Tuple = 8 lowercase__ : Optional[int] = 1_28 lowercase__ : Any = 2 lowercase__ : Optional[int] = 2 lowercase__ : List[Any] = None lowercase__ : Union[str, Any] = 1 lowercase__ : List[Any] = 0 lowercase__ : Union[str, Any] = 3 lowercase__ : Tuple = self.vocab_size - 1 lowercase__ : Union[str, Any] = 0.0_1 def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : List[str] = None if self.use_labels: lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : int = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowercase__ ( self): '''simple docstring''' random.seed(self.seed) tf.random.set_seed(self.seed) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = TFTransfoXLModel(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a} lowercase__ , lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : int = {"""input_ids""": input_ids_a, """labels""": lm_labels} lowercase__ , lowercase__ : str = model(SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ , lowercase__ : Dict = model([input_ids_a, mems_a]).to_tuple() lowercase__ : Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Any = config_and_inputs lowercase__ : Any = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : List[Any] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __lowerCAmelCase : Union[str, Any] = () if is_tf_available() else () __lowerCAmelCase : Optional[int] = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __lowerCAmelCase : Any = False __lowerCAmelCase : Dict = False __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : List[Any] = False def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = TFTransfoXLModelTester(self) lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , d_embed=37) def lowercase__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self): '''simple docstring''' self.model_tester.set_seed() lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.model_tester.set_seed() lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[str] = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: lowercase__ : Tuple = model.get_output_embeddings() assert isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer) lowercase__ : List[Any] = model.get_bias() assert name is None else: lowercase__ : Union[str, Any] = model.get_output_embeddings() assert x is None lowercase__ : Optional[Any] = model.get_bias() assert name is None def lowercase__ ( self): '''simple docstring''' pass @slow def lowercase__ ( self): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE_) self.assertIsNotNone(SCREAMING_SNAKE_CASE_) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""") def lowercase__ ( self): '''simple docstring''' pass @require_tf class _snake_case ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""") @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""") # fmt: off lowercase__ : int = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase__ : Optional[int] = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase__ : List[Any] = model.generate(SCREAMING_SNAKE_CASE_ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE_) self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_)
12
def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
1
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration UpperCamelCase = 500_000 UpperCamelCase , UpperCamelCase = os.path.split(__file__) UpperCamelCase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def __magic_name__ ( SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any: _lowercase : List[str] = dataset.map(**SCREAMING_SNAKE_CASE ) @get_duration def __magic_name__ ( SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : List[str] = dataset.filter(**SCREAMING_SNAKE_CASE ) def __magic_name__ ( ) -> int: _lowercase : List[str] = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: _lowercase : int = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) _lowercase : Any = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE , 'dataset.arrow' ) , SCREAMING_SNAKE_CASE , num_examples=SCREAMING_SNAKE_CASE ) _lowercase : Optional[Any] = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=SCREAMING_SNAKE_CASE ) def tokenize(SCREAMING_SNAKE_CASE ): return tokenizer(examples['text'] ) _lowercase : Union[str, Any] = map(SCREAMING_SNAKE_CASE ) _lowercase : int = map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) _lowercase : str = map(SCREAMING_SNAKE_CASE , function=lambda SCREAMING_SNAKE_CASE : None , batched=SCREAMING_SNAKE_CASE ) with dataset.formatted_as(type='numpy' ): _lowercase : Union[str, Any] = map(SCREAMING_SNAKE_CASE , function=lambda SCREAMING_SNAKE_CASE : None , batched=SCREAMING_SNAKE_CASE ) with dataset.formatted_as(type='pandas' ): _lowercase : Union[str, Any] = map(SCREAMING_SNAKE_CASE , function=lambda SCREAMING_SNAKE_CASE : None , batched=SCREAMING_SNAKE_CASE ) with dataset.formatted_as(type='torch' , columns='numbers' ): _lowercase : Optional[int] = map(SCREAMING_SNAKE_CASE , function=lambda SCREAMING_SNAKE_CASE : None , batched=SCREAMING_SNAKE_CASE ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): _lowercase : Any = map(SCREAMING_SNAKE_CASE , function=lambda SCREAMING_SNAKE_CASE : None , batched=SCREAMING_SNAKE_CASE ) _lowercase : Optional[int] = map(SCREAMING_SNAKE_CASE , function=SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) _lowercase : Tuple = filter(SCREAMING_SNAKE_CASE ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(SCREAMING_SNAKE_CASE , 'wb' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
713
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' def A_ ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ) -> bool: __SCREAMING_SNAKE_CASE : Any = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Tuple = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __SCREAMING_SNAKE_CASE : Optional[int] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __SCREAMING_SNAKE_CASE : Union[str, Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __SCREAMING_SNAKE_CASE : Dict = subset[i - 1][j] if arr[i - 1] <= j: __SCREAMING_SNAKE_CASE : Optional[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
158
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Tuple = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6}, } } __SCREAMING_SNAKE_CASE : Optional[int] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_2_8, '''task_specific_params.summarization.min_length''': 1_2, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_4_2, '''task_specific_params.summarization_cnn.min_length''': 5_6, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 6_2, '''task_specific_params.summarization_xsum.min_length''': 1_1, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(lowercase ) , lowercase ) def _snake_case ( self ) -> Any: '''simple docstring''' __SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(lowercase ) , x.transpose() ) ) __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def _snake_case ( self ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase ) self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) ) __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def _snake_case ( self ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : Any = tf.constant(lowercase ) self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) ) __SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 ) __SCREAMING_SNAKE_CASE : int = tf.constant(lowercase ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def _snake_case ( self ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : List[Any] = jnp.array(lowercase ) self.assertTrue(np.allclose(transpose(lowercase ) , np.asarray(transpose(lowercase ) ) ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 , 5 ) __SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(lowercase ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase , axes=(1, 2, 0) ) ) ) ) def _snake_case ( self ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.reshape(lowercase , (4, 3) ) ) ) __SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , np.reshape(lowercase , (1_2, 5) ) ) ) @require_torch def _snake_case ( self ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) ) __SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 , 5 ) __SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , reshape(lowercase , (1_2, 5) ).numpy() ) ) @require_tf def _snake_case ( self ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : List[Any] = tf.constant(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) ) __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 ) __SCREAMING_SNAKE_CASE : List[str] = tf.constant(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , reshape(lowercase , (1_2, 5) ).numpy() ) ) @require_flax def _snake_case ( self ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : str = jnp.array(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.asarray(reshape(lowercase , (4, 3) ) ) ) ) __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 ) __SCREAMING_SNAKE_CASE : List[str] = jnp.array(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , np.asarray(reshape(lowercase , (1_2, 5) ) ) ) ) def _snake_case ( self ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(lowercase ) , np.squeeze(lowercase ) ) ) __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.squeeze(lowercase , axis=2 ) ) ) @require_torch def _snake_case ( self ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) ) __SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 4 , 1 , 5 ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) ) @require_tf def _snake_case ( self ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) ) __SCREAMING_SNAKE_CASE : List[str] = np.random.randn(1 , 4 , 1 , 5 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) ) @require_flax def _snake_case ( self ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : int = jnp.array(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase ) , np.asarray(squeeze(lowercase ) ) ) ) __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 4 , 1 , 5 ) __SCREAMING_SNAKE_CASE : Any = jnp.array(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.asarray(squeeze(lowercase , axis=2 ) ) ) ) def _snake_case ( self ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.expand_dims(lowercase , axis=1 ) ) ) @require_torch def _snake_case ( self ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) ) @require_tf def _snake_case ( self ) -> Optional[int]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : Any = tf.constant(lowercase ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) ) @require_flax def _snake_case ( self ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 ) __SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(lowercase ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.asarray(expand_dims(lowercase , axis=1 ) ) ) )
158
1
"""simple docstring""" from math import sqrt def _lowerCAmelCase ( _UpperCamelCase = 1_000_000 ): """simple docstring""" _lowercase: int = 0 _lowercase: int = 0 _lowercase: int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_UpperCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f"""{solution() = }""")
712
"""simple docstring""" # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0 ): """simple docstring""" if name is None: _lowercase: str = None else: _lowercase: Optional[Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}''' _lowercase: Union[str, Any] = fmt.format(_UpperCamelCase ) # Print and recurse (if needed). if isinstance(_UpperCamelCase , _UpperCamelCase ): if msg is not None: print(_UpperCamelCase ) for k in val.keys(): recursive_print(_UpperCamelCase , val[k] , spaces + 2 ) elif isinstance(_UpperCamelCase , torch.Tensor ): print(_UpperCamelCase , ''':''' , val.size() ) else: print(_UpperCamelCase , ''':''' , _UpperCamelCase ) def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase: str = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] _lowercase: Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:] _lowercase: Any = param.view(*_UpperCamelCase ) _lowercase: Optional[Any] = param.transpose(0 , 2 ) _lowercase: List[Any] = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] _lowercase: int = (num_heads, num_splits, hidden_size) + input_shape[1:] _lowercase: Optional[Any] = param.view(*_UpperCamelCase ) _lowercase: Dict = param.transpose(0 , 1 ).contiguous() _lowercase: Optional[Any] = param.view(*_UpperCamelCase ) return param def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase: List[Any] = {} # old versions did not store training args _lowercase: int = input_state_dict.get('''args''' , _UpperCamelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) _lowercase: str = ds_args.padded_vocab_size _lowercase: Dict = ds_args.max_position_embeddings _lowercase: List[str] = ds_args.hidden_size _lowercase: List[Any] = ds_args.num_layers _lowercase: Optional[int] = ds_args.num_attention_heads _lowercase: Any = ds_args.ffn_hidden_size # pprint(config) # The number of heads. _lowercase: Optional[int] = config.n_head # The hidden_size per head. _lowercase: Dict = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): _lowercase: List[str] = input_state_dict['''checkpoint_version'''] else: _lowercase: List[Any] = 0.0 # The model. _lowercase: Dict = input_state_dict['''model'''] # The language model. _lowercase: str = model['''language_model'''] # The embeddings. _lowercase: List[Any] = lm['''embedding'''] # The word embeddings. _lowercase: Tuple = embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. _lowercase: Any = word_embeddings[: config.vocab_size, :] _lowercase: Optional[Any] = word_embeddings # The position embeddings. _lowercase: Optional[Any] = embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] _lowercase: List[str] = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. _lowercase: Any = pos_embeddings # The transformer. _lowercase: Optional[Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. _lowercase: str = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. _lowercase: Any = { '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. _lowercase: str = layer_re.match(_UpperCamelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. _lowercase: Optional[Any] = int(m.group(1 ) ) # The name of the operation. _lowercase: Tuple = m.group(2 ) # Is it a weight or a bias? _lowercase: Any = m.group(3 ) # The name of the layer. _lowercase: Dict = f'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): _lowercase: Any = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2''' _lowercase: Tuple = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. _lowercase: Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , _UpperCamelCase , _UpperCamelCase ) _lowercase: List[Any] = causal_mask # Insert a "dummy" tensor for masked_bias. _lowercase: Any = torch.tensor(-1e4 , dtype=torch.floataa ) _lowercase: Tuple = masked_bias _lowercase: List[str] = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. _lowercase: str = out_val.transpose(0 , 1 ).contiguous() # Store. _lowercase: Tuple = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": _lowercase: List[Any] = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase ) # Store. No change of shape. _lowercase: Union[str, Any] = out_val # Transpose the weights. elif weight_or_bias == "weight": _lowercase: str = megatron_to_transformers[op_name] _lowercase: str = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": _lowercase: List[Any] = megatron_to_transformers[op_name] _lowercase: Optional[int] = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. _lowercase: str = transformer['''final_layernorm.weight'''] _lowercase: Dict = transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. _lowercase: Dict = word_embeddings # It should be done! return output_state_dict def _lowerCAmelCase ( ): """simple docstring""" _lowercase: List[Any] = argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=_UpperCamelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=_UpperCamelCase , help='''An optional config json file describing the pre-trained model.''' , ) _lowercase: int = parser.parse_args() # Extract the basename. _lowercase: Tuple = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: _lowercase: str = torch.load(_UpperCamelCase , map_location='''cpu''' ) else: _lowercase: Optional[int] = torch.load(args.path_to_checkpoint , map_location='''cpu''' ) _lowercase: Dict = input_state_dict.get('''args''' , _UpperCamelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: _lowercase: List[str] = '''gelu_fast''' elif ds_args.openai_gelu: _lowercase: Optional[int] = '''gelu_new''' else: _lowercase: Any = '''gelu''' else: # in the very early days this used to be "gelu_new" _lowercase: Optional[int] = '''gelu_new''' # Spell out all parameters in case the defaults change. _lowercase: List[str] = GPTaConfig( vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_UpperCamelCase , summary_activation=_UpperCamelCase , summary_proj_to_labels=_UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , bos_token_id=50_256 , eos_token_id=50_256 , ) else: _lowercase: Optional[int] = GPTaConfig.from_json_file(args.config_file ) _lowercase: str = ['''GPT2LMHeadModel'''] # Convert. print('''Converting''' ) _lowercase: Optional[int] = convert_megatron_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(_UpperCamelCase , _UpperCamelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: _lowercase: Optional[int] = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": _lowercase: int = '''gpt2''' elif tokenizer_type == "PretrainedFromHF": _lowercase: str = ds_args.tokenizer_name_or_path else: raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: _lowercase: str = '''gpt2''' _lowercase: List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase ) _lowercase: Any = type(_UpperCamelCase ).__name__ _lowercase: Tuple = tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(_UpperCamelCase ) # Save tokenizer based on args print(f'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(_UpperCamelCase ) # Store the state_dict to file. _lowercase: int = os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) print(f'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(_UpperCamelCase , _UpperCamelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
272
0
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def A_( A : Optional[int] , A : Optional[Any]): UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' UpperCamelCase = Image.open(requests.get(A , stream=A).raw).convert('RGB') UpperCamelCase = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711)), ]) UpperCamelCase = transform(A).unsqueeze(0).to(A) return image def A_( A : Any): if "visual_encoder" in key: UpperCamelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , A) if "blocks" in key: UpperCamelCase = re.sub(r'blocks' , 'layers' , A) if "attn" in key: UpperCamelCase = re.sub(r'attn' , 'self_attn' , A) if "norm1" in key: UpperCamelCase = re.sub(r'norm1' , 'layer_norm1' , A) if "norm2" in key: UpperCamelCase = re.sub(r'norm2' , 'layer_norm2' , A) if "encoder.norm" in key: UpperCamelCase = re.sub(r'encoder.norm' , 'post_layernorm' , A) if "encoder.patch_embed.proj" in key: UpperCamelCase = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , A) if "encoder.pos_embed" in key: UpperCamelCase = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , A) if "encoder.cls_token" in key: UpperCamelCase = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , A) if "self_attn" in key: UpperCamelCase = re.sub(r'self_attn.proj' , 'self_attn.projection' , A) return key @torch.no_grad() def A_( A : List[str] , A : Any=None): if config_path is not None: UpperCamelCase = BlipConfig.from_pretrained(A) else: UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={}) UpperCamelCase = BlipForConditionalGeneration(A).eval() UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' UpperCamelCase = blip_decoder(pretrained=A , image_size=384 , vit='base') UpperCamelCase = pt_model.eval() UpperCamelCase = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCamelCase = modified_state_dict.pop(A) UpperCamelCase = rename_key(A) UpperCamelCase = value hf_model.load_state_dict(A) UpperCamelCase = 384 UpperCamelCase = load_demo_image(image_size=A , device='cpu') UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased') UpperCamelCase = tokenizer(['a picture of']).input_ids UpperCamelCase = hf_model.generate(A , A) assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] UpperCamelCase = hf_model.generate(A) assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(A) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCamelCase = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) UpperCamelCase = blip_vqa(pretrained=A , image_size=A , vit='base') vqa_model.eval() UpperCamelCase = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCamelCase = modified_state_dict.pop(A) UpperCamelCase = rename_key(A) UpperCamelCase = value UpperCamelCase = BlipForQuestionAnswering(A) hf_vqa_model.load_state_dict(A) UpperCamelCase = ['How many dogs are in this image?'] UpperCamelCase = tokenizer(A , return_tensors='pt').input_ids UpperCamelCase = hf_vqa_model.generate(A , A) print(tokenizer.decode(answer[0])) assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa') UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' UpperCamelCase = blip_itm(pretrained=A , image_size=A , vit='base') itm_model.eval() UpperCamelCase = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCamelCase = modified_state_dict.pop(A) UpperCamelCase = rename_key(A) UpperCamelCase = value UpperCamelCase = BlipForImageTextRetrieval(A) UpperCamelCase = ['A picture of a woman with a dog sitting in a beach'] UpperCamelCase = tokenizer( A , return_tensors='pt' , padding='max_length' , truncation=A , max_length=35 , ).input_ids hf_itm_model.load_state_dict(A) hf_itm_model.eval() UpperCamelCase = hf_itm_model(A , A , use_itm_head=A) UpperCamelCase = hf_itm_model(A , A , use_itm_head=A) assert out[0].item() == 0.2_110_687_494_277_954 assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45_698_845_386_505_127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm') if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') lowerCAmelCase : Optional[Any] = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
3
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase : Optional[Any] = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
1
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowerCAmelCase ( _lowerCAmelCase )-> int: __UpperCAmelCase = prime_factors(_lowerCAmelCase ) if is_square_free(_lowerCAmelCase ): return -1 if len(_lowerCAmelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class UpperCAmelCase : def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=64 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = seq_length __UpperCAmelCase = is_training __UpperCAmelCase = use_input_mask __UpperCAmelCase = use_token_type_ids __UpperCAmelCase = use_labels __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = embedding_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = type_sequence_label_size __UpperCAmelCase = initializer_range __UpperCAmelCase = num_labels __UpperCAmelCase = num_choices __UpperCAmelCase = scope def __lowerCamelCase ( self ): __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None if self.use_token_type_ids: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = MegatronBertModel(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A ) __UpperCAmelCase = model(__A , token_type_ids=__A ) __UpperCAmelCase = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = MegatronBertForMaskedLM(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = MegatronBertForCausalLM(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = MegatronBertForNextSentencePrediction(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = MegatronBertForPreTraining(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = MegatronBertForQuestionAnswering(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = MegatronBertForSequenceClassification(__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = MegatronBertForTokenClassification(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ): __UpperCAmelCase = self.num_choices __UpperCAmelCase = MegatronBertForMultipleChoice(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) = config_and_inputs __UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): _A : str = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _A : Dict = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _A : List[str] = True # test_resize_embeddings = False _A : Tuple = False def __lowerCamelCase ( self , __A , __A , __A=False ): __UpperCAmelCase = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class in get_values(__A ): __UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A ) __UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = MegatronBertModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A ) def _lowerCAmelCase ( _lowerCAmelCase )-> Dict: return torch.tensor( _lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , ) _A: Any = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow @unittest.skip('Model is not available.' ) def __lowerCamelCase ( self ): __UpperCAmelCase = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: __UpperCAmelCase = os.path.join(os.environ['MYDIR'] , __A ) __UpperCAmelCase = MegatronBertModel.from_pretrained(__A ) model.to(__A ) model.half() __UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] ) with torch.no_grad(): __UpperCAmelCase = model(__A )[0] __UpperCAmelCase = torch.Size((1, 9, 1_024) ) self.assertEqual(output.shape , __A ) __UpperCAmelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3 ): for jj in range(3 ): __UpperCAmelCase = output[0, ii, jj] __UpperCAmelCase = expected[3 * ii + jj] __UpperCAmelCase = 'ii={} jj={} a={} b={}'.format(__A , __A , __A , __A ) self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A )
617
0
import string def __UpperCAmelCase ( a_): for key in range(len(string.ascii_uppercase)): snake_case_ = '' for symbol in message: if symbol in string.ascii_uppercase: snake_case_ = string.ascii_uppercase.find(a_) snake_case_ = num - key if num < 0: snake_case_ = num + len(string.ascii_uppercase) snake_case_ = translated + string.ascii_uppercase[num] else: snake_case_ = translated + symbol print(f'''Decryption using Key #{key}: {translated}''') def __UpperCAmelCase ( ): snake_case_ = input('Encrypted message: ') snake_case_ = message.upper() decrypt(a_) if __name__ == "__main__": import doctest doctest.testmod() main()
198
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowercase = logging.get_logger(__name__) class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' lowerCAmelCase = ['''input_features''', '''is_longer'''] def __init__( self , a=64 , a=4_80_00 , a=4_80 , a=10 , a=10_24 , a=0.0 , a=False , a = 0 , a = 1_40_00 , a = None , a = "fusion" , a = "repeatpad" , **a , ) -> List[str]: super().__init__( feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm=a , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ) def _UpperCamelCase ( self ) -> Dict[str, Any]: snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _UpperCamelCase ( self , a , a = None ) -> np.ndarray: snake_case_ = spectrogram( a , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=a , log_mel='dB' , ) return log_mel_spectrogram.T def _UpperCamelCase ( self , a , a , a ) -> Tuple: snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( a , size=[chunk_frames, 64] , mode='bilinear' , align_corners=a ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def _UpperCamelCase ( self , a , a , a , a ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(a ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(a , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(a , a , a ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(a ) ) snake_case_ = np.stack(np.tile(a , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(a ) ) snake_case_ = np.stack(np.tile(a , a ) ) snake_case_ = np.pad(a , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(a , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , a , a = None , a = None , a = None , a = None , a = None , **a , ) -> BatchFeature: snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): snake_case_ = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(a )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(a , max_length if max_length else self.nb_max_samples , a , a ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(a ) is_longer.append(a ) if truncation == "fusion" and sum(a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(a ) ) snake_case_ = True if isinstance(input_mel[0] , a ): snake_case_ = [np.asarray(a , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(a ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(a ) return input_features
198
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCAmelCase ( __A , __A , unittest.TestCase ): """simple docstring""" _lowerCamelCase = AutoencoderKL _lowerCamelCase = """sample""" _lowerCamelCase = 1e-2 @property def snake_case_ ( self ): __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(__A ) return {"sample": image} @property def snake_case_ ( self ): return (3, 32, 32) @property def snake_case_ ( self ): return (3, 32, 32) def snake_case_ ( self ): __a = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } __a = self.dummy_input return init_dict, inputs_dict def snake_case_ ( self ): pass def snake_case_ ( self ): pass @unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" ) def snake_case_ ( self ): # enable deterministic behavior for gradient checkpointing __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**__A ) model.to(__A ) assert not model.is_gradient_checkpointing and model.training __a = model(**__A ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(__A ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**__A ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__A ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**__A ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def snake_case_ ( self ): __a , __a = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__A ) self.assertIsNotNone(__A ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__A ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def snake_case_ ( self ): __a = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) __a = model.to(__A ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=__A ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(__A ) with torch.no_grad(): __a = model(__A , sample_posterior=__A , generator=__A ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(__A , __A , rtol=1E-2 ) ) @slow class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self , __A , __A ): return f'''gaussian_noise_s={seed}_shape={'_'.join([str(__A ) for s in shape] )}.npy''' def snake_case_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self , __A=0 , __A=(4, 3, 512, 512) , __A=False ): __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(__A , __A ) ) ).to(__A ).to(__A ) return image def snake_case_ ( self , __A="CompVis/stable-diffusion-v1-4" , __A=False ): __a = """fp16""" if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( __A , subfolder="""vae""" , torch_dtype=__A , revision=__A , ) model.to(__A ).eval() return model def snake_case_ ( self , __A=0 ): if torch_device == "mps": return torch.manual_seed(__A ) return torch.Generator(device=__A ).manual_seed(__A ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def snake_case_ ( self , __A , __A , __A ): __a = self.get_sd_vae_model() __a = self.get_sd_image(__A ) __a = self.get_generator(__A ) with torch.no_grad(): __a = model(__A , generator=__A , sample_posterior=__A ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(__A , __A , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def snake_case_ ( self , __A , __A ): __a = self.get_sd_vae_model(fpaa=__A ) __a = self.get_sd_image(__A , fpaa=__A ) __a = self.get_generator(__A ) with torch.no_grad(): __a = model(__A , generator=__A , sample_posterior=__A ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(__A ) assert torch_all_close(__A , __A , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def snake_case_ ( self , __A , __A , __A ): __a = self.get_sd_vae_model() __a = self.get_sd_image(__A ) with torch.no_grad(): __a = model(__A ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(__A , __A , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def snake_case_ ( self , __A , __A ): __a = self.get_sd_vae_model() __a = self.get_sd_image(__A , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(__A ) assert torch_all_close(__A , __A , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def snake_case_ ( self , __A , __A ): __a = self.get_sd_vae_model(fpaa=__A ) __a = self.get_sd_image(__A , shape=(3, 4, 64, 64) , fpaa=__A ) with torch.no_grad(): __a = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(__A ) assert torch_all_close(__A , __A , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def snake_case_ ( self , __A ): __a = self.get_sd_vae_model(fpaa=__A ) __a = self.get_sd_image(__A , shape=(3, 4, 64, 64) , fpaa=__A ) with torch.no_grad(): __a = model.decode(__A ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__A , __A , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def snake_case_ ( self , __A ): __a = self.get_sd_vae_model() __a = self.get_sd_image(__A , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(__A ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__A , __A , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def snake_case_ ( self , __A , __A ): __a = self.get_sd_vae_model() __a = self.get_sd_image(__A ) __a = self.get_generator(__A ) with torch.no_grad(): __a = model.encode(__A ).latent_dist __a = dist.sample(generator=__A ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(__A ) __a = 3E-3 if torch_device != """mps""" else 1E-2 assert torch_all_close(__A , __A , atol=__A )
703
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase ( __A ): """simple docstring""" _lowerCamelCase = ["""image_processor""", """tokenizer"""] _lowerCamelCase = """LayoutLMv2ImageProcessor""" _lowerCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self , __A=None , __A=None , **__A ): if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) __a = kwargs.pop("""feature_extractor""" ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self , __A , __A = None , __A = None , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes """ """if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" ) # first, apply the image processor __a = self.image_processor(images=__A , return_tensors=__A ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__A , __A ): __a = [text] # add batch dimension (as the image processor always adds a batch dimension) __a = features["""words"""] __a = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel values __a = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: __a = self.get_overflowing_images(__A , encoded_inputs["""overflow_to_sample_mapping"""] ) __a = images return encoded_inputs def snake_case_ ( self , __A , __A ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image __a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__A ) != len(__A ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f''' {len(__A )} and {len(__A )}''' ) return images_with_overflow def snake_case_ ( self , *__A , **__A ): return self.tokenizer.batch_decode(*__A , **__A ) def snake_case_ ( self , *__A , **__A ): return self.tokenizer.decode(*__A , **__A ) @property def snake_case_ ( self ): return ["input_ids", "bbox", "attention_mask", "image"] @property def snake_case_ ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def snake_case_ ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
209
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _a ( unittest.TestCase ): def __init__( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Dict=3 , UpperCamelCase_: Any=10 , UpperCamelCase_: Optional[int]=18 , UpperCamelCase_: List[Any]=30 , UpperCamelCase_: Tuple=400 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=None , UpperCamelCase_: Tuple=True , UpperCamelCase_: str=[0.5, 0.5, 0.5] , UpperCamelCase_: List[Any]=[0.5, 0.5, 0.5] , UpperCamelCase_: Dict=None , ) -> Any: """simple docstring""" lowercase__ = size if size is not None else {'''shortest_edge''': 18} lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = num_frames lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std lowercase__ = crop_size def lowerCamelCase_ ( self: int ) -> List[str]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = VivitImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = VivitImageProcessingTester(self ) @property def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def lowerCamelCase_ ( self: Any ) -> Optional[Any]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase_ ( self: List[str] ) -> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input lowercase__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
43
import sys import turtle def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 ) triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 ) triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) __lowerCamelCase : str = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") __lowerCamelCase : List[Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
416
0
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __UpperCamelCase : Optional[int] = 4 __UpperCamelCase : Any = 3 class _UpperCamelCase ( A ): '''simple docstring''' pass def _UpperCAmelCase ( UpperCAmelCase : List[str] ): """simple docstring""" for shard in shards: for i in range(UpperCAmelCase ): yield {"i": i, "shard": shard} def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : List[Any] = int(os.environ["""RANK"""] ) __lowerCamelCase : Union[str, Any] = int(os.environ["""WORLD_SIZE"""] ) __lowerCamelCase : Union[str, Any] = ArgumentParser() parser.add_argument("""--streaming""" , type=UpperCAmelCase ) parser.add_argument("""--local_rank""" , type=UpperCAmelCase ) parser.add_argument("""--num_workers""" , type=UpperCAmelCase , default=0 ) __lowerCamelCase : Dict = parser.parse_args() __lowerCamelCase : Any = args.streaming __lowerCamelCase : Optional[int] = args.num_workers __lowerCamelCase : List[str] = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(UpperCAmelCase )]} __lowerCamelCase : Tuple = IterableDataset.from_generator(UpperCAmelCase , gen_kwargs=UpperCAmelCase ) if not streaming: __lowerCamelCase : Optional[Any] = Dataset.from_list(list(UpperCAmelCase ) ) __lowerCamelCase : int = split_dataset_by_node(UpperCAmelCase , rank=UpperCAmelCase , world_size=UpperCAmelCase ) __lowerCamelCase : Tuple = torch.utils.data.DataLoader(UpperCAmelCase , num_workers=UpperCAmelCase ) __lowerCamelCase : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __lowerCamelCase : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __lowerCamelCase : Optional[Any] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
458
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[Any] ): '''simple docstring''' __lowerCamelCase : List[Any] = tempfile.mkdtemp() __lowerCamelCase : Union[str, Any] = SamImageProcessor() __lowerCamelCase : int = SamProcessor(_lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Tuple , **_lowerCamelCase : Any ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor def _snake_case ( self : List[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _snake_case ( self : str ): '''simple docstring''' __lowerCamelCase : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] __lowerCamelCase : Optional[int] = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowerCamelCase : Tuple = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase : Optional[int] = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) __lowerCamelCase : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : int = self.get_image_processor() __lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase ) __lowerCamelCase : Optional[Any] = self.prepare_image_inputs() __lowerCamelCase : List[str] = image_processor(_lowerCamelCase , return_tensors="""np""" ) __lowerCamelCase : List[Any] = processor(images=_lowerCamelCase , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowerCamelCase : Dict = self.get_image_processor() __lowerCamelCase : Union[str, Any] = SamProcessor(image_processor=_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = [torch.ones((1, 3, 5, 5) )] __lowerCamelCase : Any = [[1_7_6_4, 2_6_4_6]] __lowerCamelCase : Any = [[6_8_3, 1_0_2_4]] __lowerCamelCase : Tuple = processor.post_process_masks(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) __lowerCamelCase : Union[str, Any] = processor.post_process_masks( _lowerCamelCase , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) # should also work with np __lowerCamelCase : Any = [np.ones((1, 3, 5, 5) )] __lowerCamelCase : int = processor.post_process_masks(_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) __lowerCamelCase : List[str] = [[1, 0], [0, 1]] with self.assertRaises(_lowerCamelCase ): __lowerCamelCase : Union[str, Any] = processor.post_process_masks(_lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) ) @require_vision @require_tf class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : str ): '''simple docstring''' __lowerCamelCase : Optional[Any] = tempfile.mkdtemp() __lowerCamelCase : int = SamImageProcessor() __lowerCamelCase : Dict = SamProcessor(_lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : List[str] , **_lowerCamelCase : List[Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor def _snake_case ( self : str ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowerCamelCase : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] __lowerCamelCase : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : int = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) __lowerCamelCase : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _snake_case ( self : str ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = self.get_image_processor() __lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase ) __lowerCamelCase : str = self.prepare_image_inputs() __lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""np""" ) __lowerCamelCase : Union[str, Any] = processor(images=_lowerCamelCase , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _snake_case ( self : List[Any] ): '''simple docstring''' __lowerCamelCase : List[Any] = self.get_image_processor() __lowerCamelCase : str = SamProcessor(image_processor=_lowerCamelCase ) __lowerCamelCase : Any = [tf.ones((1, 3, 5, 5) )] __lowerCamelCase : Optional[Any] = [[1_7_6_4, 2_6_4_6]] __lowerCamelCase : List[str] = [[6_8_3, 1_0_2_4]] __lowerCamelCase : Optional[int] = processor.post_process_masks(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) __lowerCamelCase : Optional[Any] = processor.post_process_masks( _lowerCamelCase , tf.convert_to_tensor(_lowerCamelCase ) , tf.convert_to_tensor(_lowerCamelCase ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) # should also work with np __lowerCamelCase : Tuple = [np.ones((1, 3, 5, 5) )] __lowerCamelCase : str = processor.post_process_masks( _lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) __lowerCamelCase : Union[str, Any] = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowerCamelCase : Optional[Any] = processor.post_process_masks( _lowerCamelCase , np.array(_lowerCamelCase ) , np.array(_lowerCamelCase ) , return_tensors="""tf""" ) @require_vision @require_torchvision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Tuple ): '''simple docstring''' __lowerCamelCase : Optional[int] = tempfile.mkdtemp() __lowerCamelCase : Union[str, Any] = SamImageProcessor() __lowerCamelCase : Dict = SamProcessor(_lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Optional[int] , **_lowerCamelCase : Tuple ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor def _snake_case ( self : List[str] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _snake_case ( self : str ): '''simple docstring''' __lowerCamelCase : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] __lowerCamelCase : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = self.get_image_processor() __lowerCamelCase : Tuple = SamProcessor(image_processor=_lowerCamelCase ) __lowerCamelCase : str = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowerCamelCase : Tuple = [tf.convert_to_tensor(_lowerCamelCase )] __lowerCamelCase : List[str] = [torch.tensor(_lowerCamelCase )] __lowerCamelCase : Optional[int] = [[1_7_6_4, 2_6_4_6]] __lowerCamelCase : Tuple = [[6_8_3, 1_0_2_4]] __lowerCamelCase : int = processor.post_process_masks( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""tf""" ) __lowerCamelCase : Any = processor.post_process_masks( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : Optional[int] = self.get_image_processor() __lowerCamelCase : int = SamProcessor(image_processor=_lowerCamelCase ) __lowerCamelCase : Any = self.prepare_image_inputs() __lowerCamelCase : Any = image_processor(_lowerCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy() __lowerCamelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy() __lowerCamelCase : Dict = image_processor(_lowerCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy() __lowerCamelCase : int = processor(images=_lowerCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) ) self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) ) self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
458
1
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowerCamelCase__ ( UpperCAmelCase_="" )-> str: """simple docstring""" UpperCamelCase = tempfile.mkdtemp() return os.path.join(__A , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class __a ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Any )-> str: """simple docstring""" UpperCamelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5 UpperCamelCase = AgentAudio(lowercase__ ) UpperCamelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase__ , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowercase__ ) ) # Ensure that the file contains the same value as the original tensor UpperCamelCase , UpperCamelCase = sf.read(lowercase__ ) self.assertTrue(torch.allclose(lowercase__ , torch.tensor(lowercase__ ) , atol=1e-4 ) ) def _SCREAMING_SNAKE_CASE ( self : Any )-> str: """simple docstring""" UpperCamelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5 UpperCamelCase = get_new_path(suffix=".wav" ) sf.write(lowercase__ , lowercase__ , 16_000 ) UpperCamelCase = AgentAudio(lowercase__ ) self.assertTrue(torch.allclose(lowercase__ , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , lowercase__ ) @require_vision @require_torch class __a ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[Any]: """simple docstring""" UpperCamelCase = torch.randint(0 , 256 , (64, 64, 3) ) UpperCamelCase = AgentImage(lowercase__ ) UpperCamelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase__ , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase__ ) ) def _SCREAMING_SNAKE_CASE ( self : str )-> Optional[Any]: """simple docstring""" UpperCamelCase = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" UpperCamelCase = Image.open(lowercase__ ) UpperCamelCase = AgentImage(lowercase__ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase__ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[int]: """simple docstring""" UpperCamelCase = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" UpperCamelCase = Image.open(lowercase__ ) UpperCamelCase = AgentImage(lowercase__ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase__ ) ) class __a ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : str )-> Tuple: """simple docstring""" UpperCamelCase = "Hey!" UpperCamelCase = AgentText(lowercase__ ) self.assertEqual(lowercase__ , agent_type.to_string() ) self.assertEqual(lowercase__ , agent_type.to_raw() ) self.assertEqual(lowercase__ , lowercase__ )
554
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self ): '''simple docstring''' __A =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) __A =sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) sd_pipe.set_scheduler('''sample_euler''' ) __A ='''A painting of a squirrel eating a burger''' __A =torch.manual_seed(0 ) __A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' ) __A =output.images __A =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __A =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCamelCase ( self ): '''simple docstring''' __A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __A =sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) sd_pipe.set_scheduler('''sample_euler''' ) __A ='''A painting of a squirrel eating a burger''' __A =torch.manual_seed(0 ) __A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' ) __A =output.images __A =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __A =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def __UpperCamelCase ( self ): '''simple docstring''' __A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __A =sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) __A ='''A painting of a squirrel eating a burger''' __A =torch.manual_seed(0 ) __A =sd_pipe( [prompt] , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=lowercase__ , ) __A =output.images __A =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __A =np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
184
0
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _lowercase = """platform""" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , ) -> List[str]: if attention_mask is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE_ : List[Any] =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: SCREAMING_SNAKE_CASE_ : Any =np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE_ : Any =np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE_ : Tuple =np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowercase_ : def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=16 , __A=2 , __A=4 , __A=4 , __A="gelu" , __A=0.1 , __A=0.1 , __A=32 , __A=2 , __A=1 , __A=0 , __A=0.02 , ) -> List[Any]: SCREAMING_SNAKE_CASE_ : Tuple =parent SCREAMING_SNAKE_CASE_ : str =batch_size SCREAMING_SNAKE_CASE_ : List[Any] =seq_length SCREAMING_SNAKE_CASE_ : List[Any] =is_training SCREAMING_SNAKE_CASE_ : List[str] =use_labels SCREAMING_SNAKE_CASE_ : Dict =vocab_size SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_size SCREAMING_SNAKE_CASE_ : int =num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] =num_attention_heads SCREAMING_SNAKE_CASE_ : Optional[int] =intermediate_size SCREAMING_SNAKE_CASE_ : Any =hidden_act SCREAMING_SNAKE_CASE_ : Union[str, Any] =hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Dict =attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[int] =max_position_embeddings SCREAMING_SNAKE_CASE_ : List[str] =eos_token_id SCREAMING_SNAKE_CASE_ : Optional[int] =pad_token_id SCREAMING_SNAKE_CASE_ : List[Any] =bos_token_id SCREAMING_SNAKE_CASE_ : Dict =initializer_range def _snake_case ( self ) -> Dict: SCREAMING_SNAKE_CASE_ : Dict =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) SCREAMING_SNAKE_CASE_ : Tuple =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) SCREAMING_SNAKE_CASE_ : Tuple =shift_tokens_right(__A , 1 , 2 ) SCREAMING_SNAKE_CASE_ : List[str] =BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__A , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =prepare_blenderbot_inputs_dict(__A , __A , __A ) return config, inputs_dict def _snake_case ( self ) -> Tuple: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =self.prepare_config_and_inputs() return config, inputs_dict def _snake_case ( self , __A , __A , __A ) -> List[Any]: SCREAMING_SNAKE_CASE_ : Any =20 SCREAMING_SNAKE_CASE_ : Tuple =model_class_name(__A ) SCREAMING_SNAKE_CASE_ : Dict =model.encode(inputs_dict['''input_ids'''] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) SCREAMING_SNAKE_CASE_ : Tuple =model.init_cache(decoder_input_ids.shape[0] , __A , __A ) SCREAMING_SNAKE_CASE_ : Dict =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) SCREAMING_SNAKE_CASE_ : Dict =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE_ : int =model.decode( decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , ) SCREAMING_SNAKE_CASE_ : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) SCREAMING_SNAKE_CASE_ : Tuple =model.decode( decoder_input_ids[:, -1:] , __A , decoder_attention_mask=__A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__A , ) SCREAMING_SNAKE_CASE_ : List[Any] =model.decode(__A , __A ) SCREAMING_SNAKE_CASE_ : List[str] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) def _snake_case ( self , __A , __A , __A ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ : Optional[int] =20 SCREAMING_SNAKE_CASE_ : Union[str, Any] =model_class_name(__A ) SCREAMING_SNAKE_CASE_ : List[Any] =model.encode(inputs_dict['''input_ids'''] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) SCREAMING_SNAKE_CASE_ : Optional[Any] =jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =model.init_cache(decoder_input_ids.shape[0] , __A , __A ) SCREAMING_SNAKE_CASE_ : List[str] =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE_ : Optional[int] =model.decode( decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , ) SCREAMING_SNAKE_CASE_ : str =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) SCREAMING_SNAKE_CASE_ : int =model.decode( decoder_input_ids[:, -1:] , __A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__A , decoder_position_ids=__A , ) SCREAMING_SNAKE_CASE_ : Any =model.decode(__A , __A , decoder_attention_mask=__A ) SCREAMING_SNAKE_CASE_ : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) @require_flax class lowercase_ ( unittest.TestCase ): __lowerCamelCase = 9_9 def _snake_case ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ : int =np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =input_ids.shape[0] SCREAMING_SNAKE_CASE_ : Optional[int] =BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _snake_case ( self ) -> int: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =self._get_config_and_data() SCREAMING_SNAKE_CASE_ : Tuple =FlaxBlenderbotForConditionalGeneration(__A ) SCREAMING_SNAKE_CASE_ : Optional[int] =lm_model(input_ids=__A ) SCREAMING_SNAKE_CASE_ : List[str] =(batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __A ) def _snake_case ( self ) -> str: SCREAMING_SNAKE_CASE_ : Dict =BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =FlaxBlenderbotForConditionalGeneration(__A ) SCREAMING_SNAKE_CASE_ : Dict =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) SCREAMING_SNAKE_CASE_ : int =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) SCREAMING_SNAKE_CASE_ : Dict =lm_model(input_ids=__A , decoder_input_ids=__A ) SCREAMING_SNAKE_CASE_ : Dict =(*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __A ) def _snake_case ( self ) -> int: SCREAMING_SNAKE_CASE_ : Dict =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) SCREAMING_SNAKE_CASE_ : List[Any] =shift_tokens_right(__A , 1 , 2 ) SCREAMING_SNAKE_CASE_ : Any =np.equal(__A , 1 ).astype(np.floataa ).sum() SCREAMING_SNAKE_CASE_ : Tuple =np.equal(__A , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__A , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowercase_ ( A , unittest.TestCase , A ): __lowerCamelCase = True __lowerCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __lowerCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def _snake_case ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ : Any =FlaxBlenderbotModelTester(self ) def _snake_case ( self ) -> Any: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__A , __A , __A ) def _snake_case ( self ) -> str: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__A , __A , __A ) def _snake_case ( self ) -> str: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE_ : Any =self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE_ : List[Any] =model_class(__A ) @jax.jit def encode_jitted(__A , __A=None , **__A ): return model.encode(input_ids=__A , attention_mask=__A ) with self.subTest('''JIT Enabled''' ): SCREAMING_SNAKE_CASE_ : List[str] =encode_jitted(**__A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE_ : str =encode_jitted(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) ) for jitted_output, output in zip(__A , __A ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE_ : str =model_class(__A ) SCREAMING_SNAKE_CASE_ : int =model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) SCREAMING_SNAKE_CASE_ : Any ={ '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(__A , __A , __A ): return model.decode( decoder_input_ids=__A , decoder_attention_mask=__A , encoder_outputs=__A , ) with self.subTest('''JIT Enabled''' ): SCREAMING_SNAKE_CASE_ : str =decode_jitted(**__A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE_ : List[str] =decode_jitted(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) ) for jitted_output, output in zip(__A , __A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _snake_case ( self ) -> Dict: for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_ : List[Any] =model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids SCREAMING_SNAKE_CASE_ : Dict =np.ones((1, 1) ) * model.config.eos_token_id SCREAMING_SNAKE_CASE_ : List[Any] =model(__A ) self.assertIsNotNone(__A ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def _snake_case ( self ) -> Dict: SCREAMING_SNAKE_CASE_ : int ={'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} SCREAMING_SNAKE_CASE_ : List[Any] ={'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} SCREAMING_SNAKE_CASE_ : str =FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__A ) SCREAMING_SNAKE_CASE_ : Optional[int] =BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) SCREAMING_SNAKE_CASE_ : List[str] =['''Sam'''] SCREAMING_SNAKE_CASE_ : Tuple =tokenizer(__A , return_tensors='''jax''' ) SCREAMING_SNAKE_CASE_ : Any =model.generate(**__A , **__A ) SCREAMING_SNAKE_CASE_ : Any ='''Sam is a great name. It means "sun" in Gaelic.''' SCREAMING_SNAKE_CASE_ : Any =tokenizer.batch_decode(__A , **__A ) assert generated_txt[0].strip() == tgt_text
431
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase_ ( A ): def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ : Any =parent SCREAMING_SNAKE_CASE_ : Tuple =batch_size SCREAMING_SNAKE_CASE_ : Dict =seq_length SCREAMING_SNAKE_CASE_ : Dict =is_training SCREAMING_SNAKE_CASE_ : Any =use_input_mask SCREAMING_SNAKE_CASE_ : Dict =use_token_type_ids SCREAMING_SNAKE_CASE_ : List[Any] =use_labels SCREAMING_SNAKE_CASE_ : Any =vocab_size SCREAMING_SNAKE_CASE_ : Dict =hidden_size SCREAMING_SNAKE_CASE_ : int =num_hidden_layers SCREAMING_SNAKE_CASE_ : Any =num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] =intermediate_size SCREAMING_SNAKE_CASE_ : Dict =hidden_act SCREAMING_SNAKE_CASE_ : Any =hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[int] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : str =max_position_embeddings SCREAMING_SNAKE_CASE_ : Optional[Any] =type_vocab_size SCREAMING_SNAKE_CASE_ : List[Any] =type_sequence_label_size SCREAMING_SNAKE_CASE_ : Dict =initializer_range SCREAMING_SNAKE_CASE_ : List[str] =num_labels SCREAMING_SNAKE_CASE_ : Optional[int] =num_choices SCREAMING_SNAKE_CASE_ : Optional[Any] =relative_attention SCREAMING_SNAKE_CASE_ : Optional[Any] =position_biased_input SCREAMING_SNAKE_CASE_ : Union[str, Any] =pos_att_type SCREAMING_SNAKE_CASE_ : Tuple =scope def _snake_case ( self ) -> Tuple: SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ : int =None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) SCREAMING_SNAKE_CASE_ : Tuple =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE_ : Optional[Any] =None SCREAMING_SNAKE_CASE_ : List[str] =None SCREAMING_SNAKE_CASE_ : Optional[int] =None if self.use_labels: SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_ : str =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ) -> Dict: return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _snake_case ( self , __A ) -> Tuple: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]: SCREAMING_SNAKE_CASE_ : str =DebertaVaModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE_ : int =model(__A , attention_mask=__A , token_type_ids=__A )[0] SCREAMING_SNAKE_CASE_ : Optional[Any] =model(__A , token_type_ids=__A )[0] SCREAMING_SNAKE_CASE_ : int =model(__A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> str: SCREAMING_SNAKE_CASE_ : List[str] =DebertaVaForMaskedLM(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE_ : int =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]: SCREAMING_SNAKE_CASE_ : Tuple =self.num_labels SCREAMING_SNAKE_CASE_ : Dict =DebertaVaForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE_ : List[str] =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__A ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> Dict: SCREAMING_SNAKE_CASE_ : str =self.num_labels SCREAMING_SNAKE_CASE_ : int =DebertaVaForTokenClassification(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> Tuple: SCREAMING_SNAKE_CASE_ : Any =DebertaVaForQuestionAnswering(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE_ : Dict =model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]: SCREAMING_SNAKE_CASE_ : Any =DebertaVaForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE_ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : Tuple =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : Dict =model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ) -> int: SCREAMING_SNAKE_CASE_ : List[str] =self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : Optional[Any] =config_and_inputs SCREAMING_SNAKE_CASE_ : Dict ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ ( A , A , unittest.TestCase ): __lowerCamelCase = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def _snake_case ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ : int =DebertaVaModelTester(self ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =ConfigTester(self , config_class=__A , hidden_size=37 ) def _snake_case ( self ) -> Tuple: self.config_tester.run_common_tests() def _snake_case ( self ) -> str: SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__A ) def _snake_case ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__A ) def _snake_case ( self ) -> Any: SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__A ) def _snake_case ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__A ) def _snake_case ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__A ) def _snake_case ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__A ) @slow def _snake_case ( self ) -> Tuple: for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Union[str, Any] =DebertaVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_torch @require_sentencepiece @require_tokenizers class lowercase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def _snake_case ( self ) -> str: pass @slow def _snake_case ( self ) -> str: SCREAMING_SNAKE_CASE_ : List[Any] =DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' ) SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[Any] =model(__A , attention_mask=__A )[0] # compare the actual values for a slice. SCREAMING_SNAKE_CASE_ : int =torch.tensor( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
431
1
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
102
"""simple docstring""" import importlib.metadata import operator import re import sys from typing import Optional from packaging import version lowercase_ = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict: if got_ver is None or want_ver is None: raise ValueError( f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider''' f''' reinstalling {pkg}.''' ) if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ): raise ImportError( f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' ) def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None: __a = f'''\n{hint}''' if hint is not None else '''''' # non-versioned check if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ): __a , __a , __a = requirement, None, None else: __a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but''' f''' got {requirement}''' ) __a , __a = match[0] __a = want_full.split(''',''' ) # there could be multiple requirements __a = {} for w in want_range: __a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,''' f''' but got {requirement}''' ) __a , __a = match[0] __a = want_ver if op not in ops: raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' ) # special case if pkg == "python": __a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return # check if any version is installed try: __a = importlib.metadata.version(lowerCAmelCase__ ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]: __a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main''' return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
695
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor UpperCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( A_ ): def __init__( self : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[Any] ): warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _lowerCamelCase , ) super().__init__(*_lowerCamelCase , **_lowerCamelCase )
430
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase__ : def __init__( self : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=False , _lowerCamelCase : Optional[int]=10 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : str=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : Union[str, Any]=64 , ): _snake_case = parent _snake_case = batch_size _snake_case = is_training _snake_case = use_auxiliary_loss _snake_case = num_queries _snake_case = num_channels _snake_case = min_size _snake_case = max_size _snake_case = num_labels _snake_case = hidden_dim _snake_case = hidden_dim def lowercase ( self : Optional[int] ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _lowerCamelCase ) _snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase ) _snake_case = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5 ).float() _snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long() _snake_case = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase ( self : Union[str, Any] ): _snake_case = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _snake_case = self.num_queries _snake_case = self.num_labels _snake_case = [1, 1, 1, 1] _snake_case = self.num_channels _snake_case = 64 _snake_case = 128 _snake_case = self.hidden_dim _snake_case = self.hidden_dim _snake_case = self.hidden_dim return config def lowercase ( self : Union[str, Any] ): _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs() _snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): _snake_case = output.encoder_hidden_states _snake_case = output.pixel_decoder_hidden_states _snake_case = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers ) def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False ): with torch.no_grad(): _snake_case = MaskaFormerModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase ) _snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[Any] ): _snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() def comm_check_on_output(_lowerCamelCase : List[str] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) comm_check_on_output(_lowerCamelCase ) _snake_case = model( pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ) comm_check_on_output(_lowerCamelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} __a = False __a = False __a = False __a = False def lowercase ( self : Any ): _snake_case = MaskaFormerModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def lowercase ( self : Dict ): self.config_tester.run_common_tests() def lowercase ( self : List[str] ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase ) def lowercase ( self : int ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase ) @unittest.skip(reason='''Mask2Former does not use inputs_embeds''' ) def lowercase ( self : Union[str, Any] ): pass @unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' ) def lowercase ( self : Optional[Any] ): pass @unittest.skip(reason='''Mask2Former is not a generative model''' ) def lowercase ( self : Optional[Any] ): pass @unittest.skip(reason='''Mask2Former does not use token embeddings''' ) def lowercase ( self : Dict ): pass @require_torch_multi_gpu @unittest.skip( reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase ( self : Tuple ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : str ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) @slow def lowercase ( self : Dict ): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def lowercase ( self : Tuple ): _snake_case = (self.model_tester.min_size,) * 2 _snake_case = { '''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ), '''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ), '''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(), } _snake_case = self.model_tester.get_config() _snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase ) _snake_case = model(**_lowerCamelCase ) self.assertTrue(outputs.loss is not None ) def lowercase ( self : List[str] ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase ) def lowercase ( self : Any ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase ) _snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase ) self.assertTrue(outputs.attentions is not None ) def lowercase ( self : int ): if not self.model_tester.is_training: return _snake_case = self.all_model_classes[1] _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() _snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss loss.backward() def lowercase ( self : Dict ): _snake_case = self.all_model_classes[1] _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = True _snake_case = True _snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase ) model.train() _snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ) _snake_case = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _snake_case = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _snake_case = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _snake_case = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_lowerCamelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) UpperCAmelCase__ = 1e-4 def _UpperCAmelCase ( ) -> int: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Tuple ): return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase ( self : Optional[Any] ): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase ( self : Any ): _snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) _snake_case = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) ) with torch.no_grad(): _snake_case = model(**_lowerCamelCase ) _snake_case = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) ) _snake_case = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) ) _snake_case = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) ) def lowercase ( self : List[Any] ): _snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval() _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) _snake_case = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) ) with torch.no_grad(): _snake_case = model(**_lowerCamelCase ) # masks_queries_logits _snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) _snake_case = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] _snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) ) # class_queries_logits _snake_case = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) _snake_case = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) ) def lowercase ( self : List[str] ): _snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval() _snake_case = self.default_image_processor _snake_case = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , ) _snake_case = inputs['''pixel_values'''].to(_lowerCamelCase ) _snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']] _snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _snake_case = model(**_lowerCamelCase ) self.assertTrue(outputs.loss is not None )
430
1
'''simple docstring''' import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Dict = logging.get_logger(__name__) def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase = os.path.abspath(A__ ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model UpperCamelCase = tf.train.list_variables(A__ ) UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") UpperCamelCase = full_name.split('/' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' UpperCamelCase = name[1:] # figure out how many levels deep the name is UpperCamelCase = 0 for _name in name: if _name.startswith('layer_with_weights' ): depth += 1 else: break layer_depth.append(A__ ) # read data UpperCamelCase = tf.train.load_variable(A__ , A__ ) names.append('/'.join(A__ ) ) arrays.append(A__ ) logger.info(F"""Read a total of {len(A__ ):,} layers""" ) # Sanity check if len(set(A__ ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(A__ ) )})""" ) UpperCamelCase = list(set(A__ ) )[0] if layer_depth != 1: raise ValueError( 'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP' ' heads.' ) # convert layers logger.info('Converting weights...' ) for full_name, array in zip(A__ , A__ ): UpperCamelCase = full_name.split('/' ) UpperCamelCase = model UpperCamelCase = [] for i, m_name in enumerate(A__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('layer_with_weights' ): UpperCamelCase = int(m_name.split('-' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['embeddings', 'LayerNorm'] ) UpperCamelCase = getattr(A__ , 'embeddings' ) UpperCamelCase = getattr(A__ , 'LayerNorm' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['encoder', 'layer', str(layer_num - 4 )] ) UpperCamelCase = getattr(A__ , 'encoder' ) UpperCamelCase = getattr(A__ , 'layer' ) UpperCamelCase = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['pooler', 'dense'] ) UpperCamelCase = getattr(A__ , 'pooler' ) UpperCamelCase = getattr(A__ , 'dense' ) elif m_name == "embeddings": trace.append('embeddings' ) UpperCamelCase = getattr(A__ , 'embeddings' ) if layer_num == 0: trace.append('word_embeddings' ) UpperCamelCase = getattr(A__ , 'word_embeddings' ) elif layer_num == 1: trace.append('position_embeddings' ) UpperCamelCase = getattr(A__ , 'position_embeddings' ) elif layer_num == 2: trace.append('token_type_embeddings' ) UpperCamelCase = getattr(A__ , 'token_type_embeddings' ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append('weight' ) UpperCamelCase = getattr(A__ , 'weight' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['attention', 'self'] ) UpperCamelCase = getattr(A__ , 'attention' ) UpperCamelCase = getattr(A__ , 'self' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['attention', 'output', 'LayerNorm'] ) UpperCamelCase = getattr(A__ , 'attention' ) UpperCamelCase = getattr(A__ , 'output' ) UpperCamelCase = getattr(A__ , 'LayerNorm' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['attention', 'output', 'dense'] ) UpperCamelCase = getattr(A__ , 'attention' ) UpperCamelCase = getattr(A__ , 'output' ) UpperCamelCase = getattr(A__ , 'dense' ) elif m_name == "_output_dense": # output dense trace.extend(['output', 'dense'] ) UpperCamelCase = getattr(A__ , 'output' ) UpperCamelCase = getattr(A__ , 'dense' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['output', 'LayerNorm'] ) UpperCamelCase = getattr(A__ , 'output' ) UpperCamelCase = getattr(A__ , 'LayerNorm' ) elif m_name == "_key_dense": # attention key trace.append('key' ) UpperCamelCase = getattr(A__ , 'key' ) elif m_name == "_query_dense": # attention query trace.append('query' ) UpperCamelCase = getattr(A__ , 'query' ) elif m_name == "_value_dense": # attention value trace.append('value' ) UpperCamelCase = getattr(A__ , 'value' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['intermediate', 'dense'] ) UpperCamelCase = getattr(A__ , 'intermediate' ) UpperCamelCase = getattr(A__ , 'dense' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('output' ) UpperCamelCase = getattr(A__ , 'output' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('bias' ) UpperCamelCase = getattr(A__ , 'bias' ) elif m_name in ["kernel", "gamma"]: trace.append('weight' ) UpperCamelCase = getattr(A__ , 'weight' ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary UpperCamelCase = '.'.join(A__ ) if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , A__ ) or re.match( R'(\S+)\.attention\.output\.dense\.weight' , A__ ): UpperCamelCase = array.reshape(pointer.data.shape ) if "kernel" in full_name: UpperCamelCase = array.transpose() if pointer.shape == array.shape: UpperCamelCase = torch.from_numpy(A__ ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple: """simple docstring""" # Instantiate model logger.info(F"""Loading model based on config from {config_path}...""" ) UpperCamelCase = BertConfig.from_json_file(A__ ) UpperCamelCase = BertModel(A__ ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(A__ , A__ , A__ ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , A__ ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model (must include filename).", ) _lowerCamelCase : str = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
430
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : Dict ): """simple docstring""" UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = BlipImageProcessor() UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) UpperCamelCase = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' ) UpperCamelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def A ( self : Tuple , **UpperCamelCase__ : str ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer def A ( self : Any , **UpperCamelCase__ : Tuple ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor def A ( self : List[Any] , **UpperCamelCase__ : Optional[Any] ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer def A ( self : Any ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def A ( self : Any ): """simple docstring""" UpperCamelCase = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) UpperCamelCase = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' ) UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) UpperCamelCase = 'lower newer' UpperCamelCase = processor(text=UpperCamelCase__ ) UpperCamelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) UpperCamelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) UpperCamelCase = 'lower newer' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase = processor.batch_decode(UpperCamelCase__ ) UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_qformer_tokenizer() UpperCamelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) UpperCamelCase = 'lower newer' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
430
1
def lowerCamelCase_ ( lowerCAmelCase: int = 3 , lowerCAmelCase: int = 7 , lowerCAmelCase: int = 1_00_00_00 )-> int: _snake_case : int = 0 _snake_case : Optional[Any] = 1 for current_denominator in range(1 , limit + 1 ): _snake_case : str = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _snake_case : List[Any] = current_numerator _snake_case : Optional[int] = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=100_0000))
669
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any: _snake_case : int = False _snake_case : Any = search_prob _snake_case : Tuple = start_temperate _snake_case : Any = [] _snake_case : List[str] = 0 _snake_case : Optional[Any] = None while not search_end: _snake_case : List[Any] = current_state.score() if best_state is None or current_score > best_state.score(): _snake_case : Dict = current_state scores.append(lowerCAmelCase ) iterations += 1 _snake_case : Optional[int] = None _snake_case : Union[str, Any] = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to _snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor _snake_case : int = neighbors.pop(lowerCAmelCase ) _snake_case : Union[str, Any] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: _snake_case : Union[str, Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution _snake_case : Union[str, Any] = picked_neighbor else: _snake_case : Optional[Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability _snake_case : int = picked_neighbor _snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor _snake_case : List[str] = True else: _snake_case : Union[str, Any] = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(lowerCAmelCase ) , lowerCAmelCase ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict: return (3 * x**2) - (6 * y) lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
669
1
'''simple docstring''' import functools def lowerCamelCase__ ( a__ , a__) -> int: """simple docstring""" if not isinstance(a__ , a__) or not all(isinstance(a__ , a__) for day in days): raise ValueError('The parameter days should be a list of integers') if len(a__) != 3 or not all(isinstance(a__ , a__) for cost in costs): raise ValueError('The parameter costs should be a list of three integers') if len(a__) == 0: return 0 if min(a__) <= 0: raise ValueError('All days elements should be greater than 0') if max(a__) >= 3_6_6: raise ValueError('All days elements should be less than 366') _snake_case : Optional[Any] = set(a__) @functools.cache def dynamic_programming(a__) -> int: if index > 3_6_5: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 3_0) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
517
'''simple docstring''' _UpperCamelCase : Dict = range(2, 20 + 1) _UpperCamelCase : str = [10**k for k in range(ks[-1] + 1)] _UpperCamelCase : dict[int, dict[int, list[list[int]]]] = {} def __UpperCAmelCase ( A : List[Any] , A : str , A : Union[str, Any] , A : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = sum(a_i[j] for j in range(A , len(A ) ) ) UpperCAmelCase_ : Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = 0, 0 UpperCAmelCase_ : str = n - i UpperCAmelCase_ : Union[str, Any] = memo.get(A ) if sub_memo is not None: UpperCAmelCase_ : Dict = sub_memo.get(A ) if jumps is not None and len(A ) > 0: # find and make the largest jump without going over UpperCAmelCase_ : str = -1 for _k in range(len(A ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: UpperCAmelCase_ : Any = _k break if max_jump >= 0: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = jumps[max_jump] # since the difference between jumps is cached, add c UpperCAmelCase_ : Union[str, Any] = diff + c for j in range(min(A , len(A ) ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = divmod(A , 1_0 ) if new_c > 0: add(A , A , A ) else: UpperCAmelCase_ : Tuple = [] else: UpperCAmelCase_ : List[str] = {c: []} UpperCAmelCase_ : List[str] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = next_term(A , k - 1 , i + dn , A ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead UpperCAmelCase_ , UpperCAmelCase_ : int = compute(A , A , i + dn , A ) diff += _diff dn += terms_jumped UpperCAmelCase_ : List[str] = sub_memo[c] # keep jumps sorted by # of terms skipped UpperCAmelCase_ : List[Any] = 0 while j < len(A ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(A , (diff, dn, k) ) return (diff, dn) def __UpperCAmelCase ( A : List[str] , A : Dict , A : List[str] , A : Union[str, Any] ) -> str: if i >= n: return 0, i if k > len(A ): a_i.extend([0 for _ in range(k - len(A ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) UpperCAmelCase_ : int = i UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0, 0 for j in range(len(A ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 UpperCAmelCase_ : Optional[int] = ds_c + ds_b diff += addend UpperCAmelCase_ : Any = 0 for j in range(A ): UpperCAmelCase_ : Dict = a_i[j] + addend UpperCAmelCase_ , UpperCAmelCase_ : Tuple = divmod(A , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(A , A , A ) return diff, i - start_i def __UpperCAmelCase ( A : Dict , A : Union[str, Any] , A : str ) -> List[Any]: for j in range(A , len(A ) ): UpperCAmelCase_ : List[str] = digits[j] + addend if s >= 1_0: UpperCAmelCase_ , UpperCAmelCase_ : Any = divmod(A , 1_0 ) UpperCAmelCase_ : str = addend // 1_0 + quotient else: UpperCAmelCase_ : Any = s UpperCAmelCase_ : Any = addend // 1_0 if addend == 0: break while addend > 0: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = divmod(A , 1_0 ) digits.append(A ) def __UpperCAmelCase ( A : int = 1_0**1_5 ) -> int: UpperCAmelCase_ : Any = [1] UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : List[Any] = 0 while True: UpperCAmelCase_ , UpperCAmelCase_ : str = next_term(A , 2_0 , i + dn , A ) dn += terms_jumped if dn == n - i: break UpperCAmelCase_ : Any = 0 for j in range(len(A ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(f'''{solution() = }''')
541
0
from __future__ import annotations __a = """Muhammad Umer Farooq""" __a = """MIT""" __a = """1.0.0""" __a = """Muhammad Umer Farooq""" __a = """[email protected]""" __a = """Alpha""" import re from html.parser import HTMLParser from urllib import parse import requests class UpperCamelCase__( _UpperCamelCase ): """simple docstring""" def __init__( self : List[Any] , snake_case__ : str ): """simple docstring""" super().__init__() A =[] A =domain def _a ( self : List[Any] , snake_case__ : str , snake_case__ : list[tuple[str, str | None]] ): """simple docstring""" if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: A =parse.urljoin(self.domain , __a ) self.urls.append(__a ) def UpperCamelCase_ ( a_ ) ->str: return ".".join(get_sub_domain_name(lowercase_ ).split("." )[-2:] ) def UpperCamelCase_ ( a_ ) ->str: return parse.urlparse(lowercase_ ).netloc def UpperCamelCase_ ( a_ = "https://github.com" ) ->list[str]: A =get_domain_name(lowercase_ ) # Initialize the parser A =Parser(lowercase_ ) try: # Open URL A =requests.get(lowercase_ ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through A =set() for link in parser.urls: # open URL. # read = requests.get(link) try: A =requests.get(lowercase_ ) # Get the valid email. A =re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(lowercase_ ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(lowercase_ ) if __name__ == "__main__": __a = emails_from_url("""https://github.com""") print(F'''{len(emails)} emails found:''') print("""\n""".join(sorted(emails)))
707
from __future__ import annotations def UpperCamelCase_ ( a_ ) ->None: create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] ) def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None: if index == len(a_ ): print(a_ ) return for i in range(len(a_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A =True create_state_space_tree(a_ , a_ , index + 1 , a_ ) current_sequence.pop() A =False __a = [3, 1, 2, 4] generate_all_permutations(sequence) __a = ["A", "B", "C"] generate_all_permutations(sequence_a)
689
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]: _UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): _UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): _UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] _UpperCAmelCase = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(snake_case )-1}" ) if "norm" in key: _UpperCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] _UpperCAmelCase = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(snake_case )-1}" ) if "layer_norm1" in key: _UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: _UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 _UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )] _UpperCAmelCase = key.replace(f"block{idx}" , f"block.{int(snake_case )-1}" ) if "attn.q" in key: _UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: _UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: _UpperCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: _UpperCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: _UpperCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: _UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: _UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) _UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] _UpperCAmelCase = key.replace(f"linear_c{idx}" , f"linear_c.{int(snake_case )-1}" ) if "bot_conv" in key: _UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: _UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: _UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: _UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: _UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: _UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: _UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): _UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) _UpperCAmelCase = value return new_state_dict def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _UpperCAmelCase = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" ) _UpperCAmelCase = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" ) # next, add keys and values (in that order) to the state dict _UpperCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] _UpperCAmelCase = kv_bias[: config.hidden_sizes[i]] _UpperCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] _UpperCAmelCase = kv_bias[config.hidden_sizes[i] :] def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return image @torch.no_grad() def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=False , snake_case=None ) -> Optional[Any]: _UpperCAmelCase = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) _UpperCAmelCase = GLPNImageProcessor() # prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict _UpperCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) ) # rename keys _UpperCAmelCase = rename_keys(snake_case ) # key and value matrices need special treatment read_in_k_v(snake_case , snake_case ) # create HuggingFace model and load state dict _UpperCAmelCase = GLPNForDepthEstimation(snake_case ) model.load_state_dict(snake_case ) model.eval() # forward pass _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _UpperCAmelCase = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: _UpperCAmelCase = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"Unknown model name: {model_name}" ) _UpperCAmelCase = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
518
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup a = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def _SCREAMING_SNAKE_CASE ( snake_case = "mumbai" ) -> Generator[tuple[str, str], None, None]: _UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): _UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() _UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("Bangalore"), 1): print(F'Job {i:>2} is {job[0]} at {job[1]}')
518
1
"""simple docstring""" def A_ ( __lowercase ): if number > 0: raise ValueError('input must be a negative integer' ) UpperCamelCase_ : Optional[Any] =len(bin(__lowercase )[3:] ) UpperCamelCase_ : str =bin(abs(__lowercase ) - (1 << binary_number_length) )[3:] UpperCamelCase_ : Tuple =( ( '1' + '0' * (binary_number_length - len(__lowercase )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
395
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class a__ ( A__ ): def lowerCamelCase_ ( self :Any ): '''simple docstring''' UpperCamelCase_ : List[str] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCamelCase , 'tf_padding' ) ) self.parent.assertTrue(hasattr(_lowerCamelCase , 'depth_multiplier' ) ) class a__ : def __init__( self :Tuple , _lowerCamelCase :int , _lowerCamelCase :Optional[Any]=13 , _lowerCamelCase :List[Any]=3 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :str=0.25 , _lowerCamelCase :str=8 , _lowerCamelCase :str=8 , _lowerCamelCase :Tuple=6 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :int=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :Tuple="relu6" , _lowerCamelCase :List[Any]=1_280 , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Optional[Any]=0.02 , _lowerCamelCase :Dict=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :List[str]=10 , _lowerCamelCase :List[Any]=None , ): '''simple docstring''' UpperCamelCase_ : Optional[Any] =parent UpperCamelCase_ : Optional[Any] =batch_size UpperCamelCase_ : List[str] =num_channels UpperCamelCase_ : Union[str, Any] =image_size UpperCamelCase_ : Union[str, Any] =depth_multiplier UpperCamelCase_ : Optional[Any] =depth_divisible_by UpperCamelCase_ : Optional[Any] =min_depth UpperCamelCase_ : List[Any] =expand_ratio UpperCamelCase_ : Any =tf_padding UpperCamelCase_ : List[str] =output_stride UpperCamelCase_ : Tuple =first_layer_is_expansion UpperCamelCase_ : Any =finegrained_output UpperCamelCase_ : Dict =hidden_act UpperCamelCase_ : int =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCamelCase_ : Optional[int] =classifier_dropout_prob UpperCamelCase_ : str =use_labels UpperCamelCase_ : List[Any] =is_training UpperCamelCase_ : Tuple =num_labels UpperCamelCase_ : Optional[int] =initializer_range UpperCamelCase_ : Union[str, Any] =scope def lowerCamelCase_ ( self :str ): '''simple docstring''' UpperCamelCase_ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_ : Dict =None UpperCamelCase_ : Dict =None if self.use_labels: UpperCamelCase_ : List[str] =ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase_ : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase_ : Any =self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self :Any ): '''simple docstring''' return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Tuple , _lowerCamelCase :List[str] ): '''simple docstring''' UpperCamelCase_ : List[Any] =MobileNetVaModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() UpperCamelCase_ : List[Any] =model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[Any] ): '''simple docstring''' UpperCamelCase_ : Tuple =self.num_labels UpperCamelCase_ : List[str] =MobileNetVaForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() UpperCamelCase_ : List[str] =model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self :Any , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :str , _lowerCamelCase :Dict ): '''simple docstring''' UpperCamelCase_ : Tuple =self.num_labels UpperCamelCase_ : int =MobileNetVaForSemanticSegmentation(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() UpperCamelCase_ : Dict =model(_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) UpperCamelCase_ : int =model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self :Any ): '''simple docstring''' UpperCamelCase_ : str =self.prepare_config_and_inputs() UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : int =config_and_inputs UpperCamelCase_ : Dict ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class a__ ( A__ , A__ , unittest.TestCase ): UpperCAmelCase__ = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification, '''image-segmentation''': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def lowerCamelCase_ ( self :Union[str, Any] ): '''simple docstring''' UpperCamelCase_ : Dict =MobileNetVaModelTester(self ) UpperCamelCase_ : Any =MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def lowerCamelCase_ ( self :int ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV2 does not use inputs_embeds' ) def lowerCamelCase_ ( self :int ): '''simple docstring''' pass @unittest.skip(reason='MobileNetV2 does not support input and output embeddings' ) def lowerCamelCase_ ( self :str ): '''simple docstring''' pass @unittest.skip(reason='MobileNetV2 does not output attentions' ) def lowerCamelCase_ ( self :int ): '''simple docstring''' pass def lowerCamelCase_ ( self :List[Any] ): '''simple docstring''' UpperCamelCase_ , UpperCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : Tuple =model_class(_lowerCamelCase ) UpperCamelCase_ : Dict =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ : Optional[int] =[*signature.parameters.keys()] UpperCamelCase_ : List[str] =['pixel_values'] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowerCamelCase_ ( self :List[str] ): '''simple docstring''' UpperCamelCase_ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowerCamelCase_ ( self :Dict ): '''simple docstring''' def check_hidden_states_output(_lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] ): UpperCamelCase_ : List[str] =model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): UpperCamelCase_ : str =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) UpperCamelCase_ : Optional[Any] =outputs.hidden_states UpperCamelCase_ : List[str] =16 self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) UpperCamelCase_ , UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : Dict =True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase_ : Dict =True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_ ( self :Any ): '''simple docstring''' UpperCamelCase_ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) def lowerCamelCase_ ( self :Optional[Any] ): '''simple docstring''' UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase ) @slow def lowerCamelCase_ ( self :str ): '''simple docstring''' for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ : List[str] =MobileNetVaModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def A_ ( ): UpperCamelCase_ : Dict =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self :Tuple ): '''simple docstring''' return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self :Tuple ): '''simple docstring''' UpperCamelCase_ : Optional[int] =MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCamelCase ) UpperCamelCase_ : List[Any] =self.default_image_processor UpperCamelCase_ : List[Any] =prepare_img() UpperCamelCase_ : List[str] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): UpperCamelCase_ : List[Any] =model(**_lowerCamelCase ) # verify the logits UpperCamelCase_ : Optional[int] =torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) UpperCamelCase_ : Optional[Any] =torch.tensor([0.2445, -1.1993, 0.1905] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def lowerCamelCase_ ( self :Tuple ): '''simple docstring''' UpperCamelCase_ : Dict =MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) UpperCamelCase_ : Optional[int] =model.to(_lowerCamelCase ) UpperCamelCase_ : Dict =MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) UpperCamelCase_ : Any =prepare_img() UpperCamelCase_ : List[Any] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): UpperCamelCase_ : Any =model(**_lowerCamelCase ) UpperCamelCase_ : Any =outputs.logits # verify the logits UpperCamelCase_ : Optional[int] =torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , _lowerCamelCase ) UpperCamelCase_ : List[Any] =torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=_lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
395
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { "configuration_blip_2": [ "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Blip2Config", "Blip2QFormerConfig", "Blip2VisionConfig", ], "processing_blip_2": ["Blip2Processor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Blip2Model", "Blip2QFormerModel", "Blip2PreTrainedModel", "Blip2ForConditionalGeneration", "Blip2VisionModel", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
574
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCamelCase__ = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , ): if attention_mask is None: A__ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: A__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: A__ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _UpperCamelCase : def __init__(self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=9_9 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.0_2 , ): """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id A__ = initializer_range def A (self ): """simple docstring""" A__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) A__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) A__ = shift_tokens_right(lowerCamelCase__ , 1 , 2 ) A__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , ) A__ = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return config, inputs_dict def A (self ): """simple docstring""" A__ ,A__ = self.prepare_config_and_inputs() return config, inputs_dict def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A__ = 2_0 A__ = model_class_name(lowerCamelCase__ ) A__ = model.encode(inputs_dict["""input_ids"""] ) A__ ,A__ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) A__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ ) A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A__ = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , ) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) A__ = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , ) A__ = model.decode(lowerCamelCase__ , lowerCamelCase__ ) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A__ = 2_0 A__ = model_class_name(lowerCamelCase__ ) A__ = model.encode(inputs_dict["""input_ids"""] ) A__ ,A__ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) A__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) A__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ ) A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A__ = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , ) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) A__ = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , ) A__ = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ ) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class _UpperCamelCase ( unittest.TestCase): __lowerCamelCase = 9_9 def A (self ): """simple docstring""" A__ = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) A__ = input_ids.shape[0] A__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def A (self ): """simple docstring""" A__ ,A__ ,A__ = self._get_config_and_data() A__ = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ ) A__ = lm_model(input_ids=lowerCamelCase__ ) A__ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) A__ = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ ) A__ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) A__ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) A__ = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ) A__ = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) A__ = shift_tokens_right(lowerCamelCase__ , 1 , 2 ) A__ = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum() A__ = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(lowerCamelCase__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _UpperCamelCase ( __snake_case , unittest.TestCase , __snake_case): __lowerCamelCase = True __lowerCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __lowerCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def A (self ): """simple docstring""" A__ = FlaxBlenderbotModelTester(self ) def A (self ): """simple docstring""" A__ ,A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def A (self ): """simple docstring""" A__ ,A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def A (self ): """simple docstring""" A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) A__ = model_class(lowerCamelCase__ ) @jax.jit def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ): return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ ) with self.subTest("""JIT Enabled""" ): A__ = encode_jitted(**lowerCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): A__ = encode_jitted(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) ) for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def A (self ): """simple docstring""" A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = model_class(lowerCamelCase__ ) A__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) A__ = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): return model.decode( decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , ) with self.subTest("""JIT Enabled""" ): A__ = decode_jitted(**lowerCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): A__ = decode_jitted(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) ) for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def A (self ): """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids A__ = np.ones((1, 1) ) * model.config.eos_token_id A__ = model(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" ) @slow def A (self ): """simple docstring""" A__ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5} A__ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} A__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCamelCase__ ) A__ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" ) A__ = ["""Sam"""] A__ = tokenizer(lowerCamelCase__ , return_tensors="""jax""" ) A__ = model.generate(**lowerCamelCase__ , **lowerCamelCase__ ) A__ = """Sam is a great name. It means \"sun\" in Gaelic.""" A__ = tokenizer.batch_decode(lowerCamelCase__ , **lowerCamelCase__ ) assert generated_txt[0].strip() == tgt_text
574
1
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = None, SCREAMING_SNAKE_CASE__ = None, SCREAMING_SNAKE_CASE__ = None, SCREAMING_SNAKE_CASE__ = None, SCREAMING_SNAKE_CASE__ = None, SCREAMING_SNAKE_CASE__ = False, ) -> Optional[int]: a_ : Dict = bnb_quantization_config.load_in_abit a_ : Dict = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) a_ : str = [] # custom device map if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1: a_ : Any = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: a_ : Optional[Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ ) a_ : str = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: a_ : Any = [] a_ : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ ) # compatibility with peft a_ : int = load_in_abit a_ : Optional[int] = load_in_abit a_ : Optional[Any] = get_parameter_device(SCREAMING_SNAKE_CASE__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) a_ : Tuple = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, modules_to_not_convert=SCREAMING_SNAKE_CASE__ ) # convert param to the right dtype a_ : Any = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: a_ : Optional[int] = name.replace(".weight", "" ).replace(".bias", "" ) a_ : List[Any] = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ): param.to(SCREAMING_SNAKE_CASE__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): a_ : str = replace_with_bnb_layers( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, modules_to_not_convert=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = get_quantized_model_device_map( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, max_memory=SCREAMING_SNAKE_CASE__, no_split_module_classes=SCREAMING_SNAKE_CASE__, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): a_ : List[str] = True a_ : str = any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, dtype=bnb_quantization_config.torch_dtype, offload_folder=SCREAMING_SNAKE_CASE__, offload_state_dict=SCREAMING_SNAKE_CASE__, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, ) return dispatch_model(SCREAMING_SNAKE_CASE__, device_map=SCREAMING_SNAKE_CASE__, offload_dir=SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> List[str]: if device_map is None: if torch.cuda.is_available(): a_ : Tuple = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) a_ : Any = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) a_ : Optional[Any] = {} a_ : Optional[Any] = special_dtypes a_ : Union[str, Any] = no_split_module_classes a_ : Union[str, Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": a_ : int = get_balanced_memory( SCREAMING_SNAKE_CASE__, low_zero=(device_map == "balanced_low_0"), max_memory=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__, ) a_ : List[Any] = max_memory a_ : Union[str, Any] = infer_auto_device_map(SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): # check if don't have any quantized module on the cpu a_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules a_ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> List[Any]: if modules_to_not_convert is None: a_ : List[Any] = [] a_ , a_ : List[Any] = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None, ) -> Tuple: a_ : Optional[Any] = False for name, module in model.named_children(): if current_key_name is None: a_ : Optional[Any] = [] current_key_name.append(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` a_ : str = ".".join(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: a_ : int = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: a_ : Optional[Any] = bnb.nn.LinearabitLt( module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=SCREAMING_SNAKE_CASE__, threshold=bnb_quantization_config.llm_inta_threshold, ) elif bnb_quantization_config.load_in_abit: a_ : str = bnb.nn.Linearabit( module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) a_ : Tuple = module.weight.data if module.bias is not None: a_ : str = module.bias.data bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ ) setattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) a_ : str = True if len(list(module.children() ) ) > 0: a_ , a_ : int = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) a_ : Tuple = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple: # Create a copy of the model with init_empty_weights(): a_ : Optional[int] = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` a_ : Tuple = find_tied_parameters(SCREAMING_SNAKE_CASE__ ) # For compatibility with Accelerate < 0.18 if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): a_ : Optional[int] = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() ) else: a_ : List[str] = sum(SCREAMING_SNAKE_CASE__, [] ) a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) > 0 # Check if it is a base model a_ : Any = False if hasattr(SCREAMING_SNAKE_CASE__, "base_model_prefix" ): a_ : Tuple = not hasattr(SCREAMING_SNAKE_CASE__, model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head a_ : int = list(model.named_children() ) a_ : str = [list_modules[-1][0]] # add last module together with tied weights a_ : Dict = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ ) # remove ".weight" from the keys a_ : Union[str, Any] = [".weight", ".bias"] a_ : Optional[int] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: a_ : List[Any] = name.replace(SCREAMING_SNAKE_CASE__, "" ) filtered_module_names.append(SCREAMING_SNAKE_CASE__ ) return filtered_module_names def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple: for m in model.modules(): if isinstance(SCREAMING_SNAKE_CASE__, bnb.nn.Linearabit ): return True return False def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> str: return next(parameter.parameters() ).device def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, 0, dtype=SCREAMING_SNAKE_CASE__, value=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = param_name a_ : Dict = model if "." in tensor_name: a_ : Union[str, Any] = tensor_name.split("." ) for split in splits[:-1]: a_ : List[Any] = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) a_ : List[Any] = new_module a_ : int = splits[-1] # offload weights a_ : Dict = False offload_weight(module._parameters[tensor_name], SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, index=SCREAMING_SNAKE_CASE__ ) if hasattr(module._parameters[tensor_name], "SCB" ): offload_weight( module._parameters[tensor_name].SCB, param_name.replace("weight", "SCB" ), SCREAMING_SNAKE_CASE__, index=SCREAMING_SNAKE_CASE__, ) else: offload_weight(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, index=SCREAMING_SNAKE_CASE__ ) offload_weight(SCREAMING_SNAKE_CASE__, param_name.replace("weight", "SCB" ), SCREAMING_SNAKE_CASE__, index=SCREAMING_SNAKE_CASE__ ) set_module_tensor_to_device(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, "meta", dtype=SCREAMING_SNAKE_CASE__, value=torch.empty(*param.size() ) )
370
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""", # See all REALM models at https://huggingface.co/models?filter=realm } class snake_case_ ( a_ ): __lowerCAmelCase = "realm" def __init__( self , a_=3_0_5_2_2 , a_=7_6_8 , a_=1_2_8 , a_=1_2 , a_=1_2 , a_=8 , a_=3_0_7_2 , a_="gelu_new" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=2 , a_=0.02 , a_=1e-12 , a_=2_5_6 , a_=1_0 , a_=1e-3 , a_=5 , a_=3_2_0 , a_=1_3_3_5_3_7_1_8 , a_=5_0_0_0 , a_=1 , a_=0 , a_=2 , **a_ , ): super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) # Common config a_ : Optional[int] = vocab_size a_ : List[Any] = max_position_embeddings a_ : Optional[Any] = hidden_size a_ : Optional[Any] = retriever_proj_size a_ : List[str] = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Tuple = num_candidates a_ : str = intermediate_size a_ : Optional[int] = hidden_act a_ : List[str] = hidden_dropout_prob a_ : List[str] = attention_probs_dropout_prob a_ : Tuple = initializer_range a_ : Tuple = type_vocab_size a_ : str = layer_norm_eps # Reader config a_ : str = span_hidden_size a_ : Union[str, Any] = max_span_width a_ : Tuple = reader_layer_norm_eps a_ : List[Any] = reader_beam_size a_ : str = reader_seq_len # Retrieval config a_ : str = num_block_records a_ : int = searcher_beam_size
370
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str: lowercase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: for i in range(config.num_hidden_layers ): lowercase__ = "vilt." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) lowercase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ = in_proj_weight[ : config.hidden_size, : ] lowercase__ = in_proj_bias[: config.hidden_size] lowercase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ = in_proj_weight[ -config.hidden_size :, : ] lowercase__ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: lowercase__ = dct.pop(_SCREAMING_SNAKE_CASE ) lowercase__ = val @torch.no_grad() def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: lowercase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_SCREAMING_SNAKE_CASE ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False if "vqa" in checkpoint_url: lowercase__ = True lowercase__ = 3129 lowercase__ = "huggingface/label-files" lowercase__ = "vqa2-id2label.json" lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = ViltForQuestionAnswering(_SCREAMING_SNAKE_CASE ) elif "nlvr" in checkpoint_url: lowercase__ = True lowercase__ = 2 lowercase__ = {0: "False", 1: "True"} lowercase__ = {v: k for k, v in config.idalabel.items()} lowercase__ = 3 lowercase__ = ViltForImagesAndTextClassification(_SCREAMING_SNAKE_CASE ) elif "irtr" in checkpoint_url: lowercase__ = True lowercase__ = ViltForImageAndTextRetrieval(_SCREAMING_SNAKE_CASE ) elif "mlm_itm" in checkpoint_url: lowercase__ = True lowercase__ = ViltForMaskedLM(_SCREAMING_SNAKE_CASE ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys lowercase__ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )["state_dict"] lowercase__ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if mlm_model or irtr_model: lowercase__ = ["itm_score.fc.weight", "itm_score.fc.bias"] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load state dict into HuggingFace model model.eval() if mlm_model: lowercase__ = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Define processor lowercase__ = ViltImageProcessor(size=384 ) lowercase__ = BertTokenizer.from_pretrained('bert-base-uncased' ) lowercase__ = ViltProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Forward pass on example inputs (image + text) if nlvr_model: lowercase__ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_SCREAMING_SNAKE_CASE ).raw ) lowercase__ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_SCREAMING_SNAKE_CASE ).raw ) lowercase__ = ( "The left image contains twice the number of dogs as the right image, and at least two dogs in total are" " standing." ) lowercase__ = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase__ = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase__ = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: lowercase__ = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=_SCREAMING_SNAKE_CASE ).raw ) if mlm_model: lowercase__ = "a bunch of [MASK] laying on a [MASK]." else: lowercase__ = "How many cats are there?" lowercase__ = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase__ = model(**_SCREAMING_SNAKE_CASE ) # Verify outputs if mlm_model: lowercase__ = torch.Size([1, 11, 30522] ) lowercase__ = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) # verify masked token prediction equals "cats" lowercase__ = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: lowercase__ = torch.Size([1, 3129] ) lowercase__ = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] ) assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) # verify vqa prediction equals "2" lowercase__ = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: lowercase__ = torch.Size([1, 2] ) lowercase__ = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] ) assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase_ = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
235
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") _lowerCAmelCase : Optional[Any] =logging.getLogger(__name__) @dataclass class __UpperCamelCase : '''simple docstring''' __magic_name__ = field( default=1_2_8 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) __magic_name__ = field( default=_a ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) __magic_name__ = field( default=_a ,metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } ,) __magic_name__ = field( default=_a ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } ,) __magic_name__ = field( default=_a ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } ,) __magic_name__ = field( default=_a ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } ,) @dataclass class __UpperCamelCase : '''simple docstring''' __magic_name__ = field( default=_a ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__ = field( default=_a ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} ) __magic_name__ = field( default=_a ,metadata={"help": "Train language if it is different from the evaluation language."} ) __magic_name__ = field( default=_a ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__ = field( default=_a ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__ = field( default=_a ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) __magic_name__ = field( default=_a ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,) __magic_name__ = field( default=_a ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,) __magic_name__ = field( default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,) __magic_name__ = field( default=_a ,metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } ,) __magic_name__ = field( default=_a ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,) def _A ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase__: Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_xnli" ,SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase__: int = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE ) datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCAmelCase__: Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase__: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: UpperCAmelCase__: Optional[int] = load_dataset( "xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) else: UpperCAmelCase__: Optional[Any] = load_dataset( "xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) UpperCAmelCase__: List[Any] = train_dataset.features["label"].names if training_args.do_eval: UpperCAmelCase__: List[Any] = load_dataset( "xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) UpperCAmelCase__: Tuple = eval_dataset.features["label"].names if training_args.do_predict: UpperCAmelCase__: Optional[Any] = load_dataset( "xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) UpperCAmelCase__: List[Any] = predict_dataset.features["label"].names # Labels UpperCAmelCase__: Union[str, Any] = len(SCREAMING_SNAKE_CASE ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase__: Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE ,idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) UpperCAmelCase__: Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) UpperCAmelCase__: Optional[Any] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: UpperCAmelCase__: Union[str, Any] = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCAmelCase__: Union[str, Any] = False def preprocess_function(SCREAMING_SNAKE_CASE ): # Tokenize the texts return tokenizer( examples["premise"] ,examples["hypothesis"] ,padding=SCREAMING_SNAKE_CASE ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE ,) if training_args.do_train: if data_args.max_train_samples is not None: UpperCAmelCase__: Optional[Any] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_train_samples ) UpperCAmelCase__: Optional[Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): UpperCAmelCase__: Dict = train_dataset.map( SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,) # Log a few random samples from the training set: for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) ,3 ): logger.info(f"Sample {index} of the training set: {train_dataset[index]}." ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCAmelCase__: Optional[int] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_eval_samples ) UpperCAmelCase__: List[Any] = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): UpperCAmelCase__: Optional[Any] = eval_dataset.map( SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,) if training_args.do_predict: if data_args.max_predict_samples is not None: UpperCAmelCase__: Dict = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_predict_samples ) UpperCAmelCase__: Optional[Any] = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) ) with training_args.main_process_first(desc="prediction dataset map pre-processing" ): UpperCAmelCase__: int = predict_dataset.map( SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,) # Get the metric function UpperCAmelCase__: str = evaluate.load("xnli" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(SCREAMING_SNAKE_CASE ): UpperCAmelCase__: str = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE ) else p.predictions UpperCAmelCase__: Union[str, Any] = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 ) return metric.compute(predictions=SCREAMING_SNAKE_CASE ,references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCAmelCase__: Any = default_data_collator elif training_args.fpaa: UpperCAmelCase__: Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE ,pad_to_multiple_of=8 ) else: UpperCAmelCase__: Tuple = None # Initialize our Trainer UpperCAmelCase__: Dict = Trainer( model=SCREAMING_SNAKE_CASE ,args=SCREAMING_SNAKE_CASE ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE ,tokenizer=SCREAMING_SNAKE_CASE ,data_collator=SCREAMING_SNAKE_CASE ,) # Training if training_args.do_train: UpperCAmelCase__: Any = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase__: str = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase__: Any = last_checkpoint UpperCAmelCase__: Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE ) UpperCAmelCase__: List[Any] = train_result.metrics UpperCAmelCase__: Union[str, Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE ) ) UpperCAmelCase__: Tuple = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" ,SCREAMING_SNAKE_CASE ) trainer.save_metrics("train" ,SCREAMING_SNAKE_CASE ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase__: Any = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE ) UpperCAmelCase__: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE ) UpperCAmelCase__: int = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) ) trainer.log_metrics("eval" ,SCREAMING_SNAKE_CASE ) trainer.save_metrics("eval" ,SCREAMING_SNAKE_CASE ) # Prediction if training_args.do_predict: logger.info("*** Predict ***" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE ,metric_key_prefix="predict" ) UpperCAmelCase__: Optional[Any] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE ) ) UpperCAmelCase__: Union[str, Any] = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) ) trainer.log_metrics("predict" ,SCREAMING_SNAKE_CASE ) trainer.save_metrics("predict" ,SCREAMING_SNAKE_CASE ) UpperCAmelCase__: Any = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 ) UpperCAmelCase__: Optional[int] = os.path.join(training_args.output_dir ,"predictions.txt" ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE ,"w" ) as writer: writer.write("index\tprediction\n" ) for index, item in enumerate(SCREAMING_SNAKE_CASE ): UpperCAmelCase__: int = label_list[item] writer.write(f"{index}\t{item}\n" ) if __name__ == "__main__": main()
113
0
"""simple docstring""" from ....utils import logging a__ : List[str] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=2_0_4_8 ) -> int: __SCREAMING_SNAKE_CASE = config.__dict__ __SCREAMING_SNAKE_CASE = modal_hidden_size if num_labels: __SCREAMING_SNAKE_CASE = num_labels
709
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if len(lowerCAmelCase_ ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) __SCREAMING_SNAKE_CASE = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
553
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowerCAmelCase_ ( unittest.TestCase ): def __snake_case ( self : int ): lowerCAmelCase__ = 10 def __snake_case ( self : int ): lowerCAmelCase__ = [1, 2, 3, 4] lowerCAmelCase__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ ) def __snake_case ( self : Tuple ): lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ ) def __snake_case ( self : int ): lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ ) def __snake_case ( self : Optional[Any] ): lowerCAmelCase__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." lowerCAmelCase__ = process_story(lowercase__ ) self.assertEqual(lowercase__ , [] ) def __snake_case ( self : List[Any] ): lowerCAmelCase__ = "" lowerCAmelCase__ = process_story(lowercase__ ) self.assertEqual(lowercase__ , [] ) self.assertEqual(lowercase__ , [] ) def __snake_case ( self : Optional[Any] ): lowerCAmelCase__ = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) lowerCAmelCase__ = process_story(lowercase__ ) lowerCAmelCase__ = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(lowercase__ , lowercase__ ) lowerCAmelCase__ = ["It was the best of times."] self.assertEqual(lowercase__ , lowercase__ ) def __snake_case ( self : List[Any] ): lowerCAmelCase__ = torch.tensor([1, 2, 3, 4] ) lowerCAmelCase__ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(lowercase__ , 0 ).numpy() , expected.numpy() ) def __snake_case ( self : int ): lowerCAmelCase__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) lowerCAmelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowercase__ , 23 ).numpy() , expected.numpy() ) def __snake_case ( self : List[Any] ): lowerCAmelCase__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCAmelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowercase__ , 1 ).numpy() , expected.numpy() ) def __snake_case ( self : Any ): lowerCAmelCase__ = 101 lowerCAmelCase__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) lowerCAmelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCAmelCase__ = compute_token_type_ids(lowercase__ , lowercase__ ) np.testing.assert_array_equal(lowercase__ , lowercase__ )
668
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): @staticmethod @abstractmethod def __lowerCamelCase ( lowercase__ ): """simple docstring""" raise NotImplementedError() @abstractmethod def __lowerCamelCase ( self ): """simple docstring""" raise NotImplementedError()
421
0
"""simple docstring""" from __future__ import annotations def lowerCamelCase (a_ :list , a_ :int , a_ :int , a_ :int) -> list: lowercase :List[Any] = [] lowercase :List[str] = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0)) lowercase :Optional[int] = result + left + right return input_list def lowerCamelCase (a_ :list) -> list: if len(a_) <= 1: return input_list lowercase :Optional[Any] = list(a_) # iteration for two-way merging lowercase :Optional[Any] = 2 while p <= len(a_): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(a_) , a_): lowercase :Any = i lowercase :List[Any] = i + p - 1 lowercase :Dict = (low + high + 1) // 2 lowercase :Union[str, Any] = merge(a_ , a_ , a_ , a_) # final merge of last two parts if p * 2 >= len(a_): lowercase :Optional[Any] = i lowercase :int = merge(a_ , 0 , a_ , len(a_) - 1) break p *= 2 return input_list if __name__ == "__main__": UpperCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": UpperCAmelCase = [] else: UpperCAmelCase = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
706
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCamelCase (a_ :Dict) -> Dict: lowercase :Tuple = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(a_ , a_) def lowerCamelCase (a_ :Union[str, Any]) -> str: lowercase , lowercase :Tuple = emb.weight.shape lowercase :List[str] = nn.Linear(a_ , a_ , bias=a_) lowercase :List[str] = emb.weight.data return lin_layer def lowerCamelCase (a_ :int , a_ :Union[str, Any]="facebook/mbart-large-en-ro" , a_ :Union[str, Any]=False , a_ :List[Any]=False) -> List[Any]: lowercase :List[Any] = torch.load(a_ , map_location='''cpu''')['''model'''] remove_ignore_keys_(a_) lowercase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase :Tuple = MBartConfig.from_pretrained(a_ , vocab_size=a_) if mbart_aa and finetuned: lowercase :List[Any] = '''relu''' lowercase :Optional[int] = state_dict['''decoder.embed_tokens.weight'''] lowercase :Union[str, Any] = MBartForConditionalGeneration(a_) model.model.load_state_dict(a_) if finetuned: lowercase :Dict = make_linear_from_emb(model.model.shared) return model if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') UpperCAmelCase = parser.parse_args() UpperCAmelCase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
475
0
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( '''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , A , ) class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : List[Any] = RobertaConfig SCREAMING_SNAKE_CASE_ : Optional[int] = '''roberta''' def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" super().__init__(__lowerCamelCase ) __SCREAMING_SNAKE_CASE :List[str] = RobertaEmbeddings(__lowerCamelCase ) self.init_weights() @add_start_docstrings( '''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. ''' , A , ) class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : Any = RobertaConfig SCREAMING_SNAKE_CASE_ : List[str] = '''roberta''' def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" super().__init__(__lowerCamelCase ) __SCREAMING_SNAKE_CASE :List[Any] = config.num_labels __SCREAMING_SNAKE_CASE :Dict = config.num_hidden_layers __SCREAMING_SNAKE_CASE :Tuple = DeeRobertaModel(__lowerCamelCase ) __SCREAMING_SNAKE_CASE :Optional[int] = nn.Dropout(config.hidden_dropout_prob ) __SCREAMING_SNAKE_CASE :List[str] = nn.Linear(config.hidden_size ,self.config.num_labels ) @add_start_docstrings_to_model_forward(__lowerCamelCase ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=-1 ,SCREAMING_SNAKE_CASE__=False ,) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :List[Any] = self.num_layers try: __SCREAMING_SNAKE_CASE :Union[str, Any] = self.roberta( __lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,position_ids=__lowerCamelCase ,head_mask=__lowerCamelCase ,inputs_embeds=__lowerCamelCase ,) __SCREAMING_SNAKE_CASE :Dict = outputs[1] __SCREAMING_SNAKE_CASE :Dict = self.dropout(__lowerCamelCase ) __SCREAMING_SNAKE_CASE :Optional[Any] = self.classifier(__lowerCamelCase ) __SCREAMING_SNAKE_CASE :List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __SCREAMING_SNAKE_CASE :Optional[Any] = e.message __SCREAMING_SNAKE_CASE :Optional[int] = e.exit_layer __SCREAMING_SNAKE_CASE :Tuple = outputs[0] if not self.training: __SCREAMING_SNAKE_CASE :Optional[int] = entropy(__lowerCamelCase ) __SCREAMING_SNAKE_CASE :List[str] = [] __SCREAMING_SNAKE_CASE :str = [] if labels is not None: if self.num_labels == 1: # We are doing regression __SCREAMING_SNAKE_CASE :int = MSELoss() __SCREAMING_SNAKE_CASE :Any = loss_fct(logits.view(-1 ) ,labels.view(-1 ) ) else: __SCREAMING_SNAKE_CASE :int = CrossEntropyLoss() __SCREAMING_SNAKE_CASE :Optional[int] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) # work with highway exits __SCREAMING_SNAKE_CASE :Dict = [] for highway_exit in outputs[-1]: __SCREAMING_SNAKE_CASE :int = highway_exit[0] if not self.training: highway_logits_all.append(__lowerCamelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __SCREAMING_SNAKE_CASE :List[str] = MSELoss() __SCREAMING_SNAKE_CASE :Any = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) ) else: __SCREAMING_SNAKE_CASE :List[Any] = CrossEntropyLoss() __SCREAMING_SNAKE_CASE :Any = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) highway_losses.append(__lowerCamelCase ) if train_highway: __SCREAMING_SNAKE_CASE :str = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __SCREAMING_SNAKE_CASE :Union[str, Any] = (loss,) + outputs if not self.training: __SCREAMING_SNAKE_CASE :Tuple = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __SCREAMING_SNAKE_CASE :str = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
498
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCamelCase_ = 1.0_5457_1817e-34 # unit of ℏ : J * s lowerCamelCase_ = 3e8 # unit of c : m * s^-1 def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: _SCREAMING_SNAKE_CASE = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: _SCREAMING_SNAKE_CASE = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: _SCREAMING_SNAKE_CASE = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
418
0
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase = 3 , UpperCamelCase = 7 , UpperCamelCase = 100_0000 ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : Optional[Any] = 1 for current_denominator in range(1 , limit + 1 ): __UpperCAmelCase : str = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: __UpperCAmelCase : str = current_numerator __UpperCAmelCase : List[Any] = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
714
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A = logging.get_logger(__name__) A = { """bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""", } class a__ ( __magic_name__ ): lowercase_ = "gpt_bigcode" lowercase_ = ["past_key_values"] lowercase_ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Any , UpperCamelCase_ : Tuple=50257 , UpperCamelCase_ : Dict=1024 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]="gelu_pytorch_tanh" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=50256 , UpperCamelCase_ : Union[str, Any]=50256 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=True , **UpperCamelCase_ : Union[str, Any] , ): """simple docstring""" __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = n_positions __UpperCAmelCase : Tuple = n_embd __UpperCAmelCase : str = n_layer __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = n_inner __UpperCAmelCase : Optional[Any] = activation_function __UpperCAmelCase : List[str] = resid_pdrop __UpperCAmelCase : List[Any] = embd_pdrop __UpperCAmelCase : Optional[Any] = attn_pdrop __UpperCAmelCase : Dict = layer_norm_epsilon __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : int = scale_attn_weights __UpperCAmelCase : Tuple = use_cache __UpperCAmelCase : List[Any] = attention_softmax_in_fpaa __UpperCAmelCase : Any = scale_attention_softmax_in_fpaa __UpperCAmelCase : str = multi_query __UpperCAmelCase : int = bos_token_id __UpperCAmelCase : str = eos_token_id super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
487
0
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __a ( _snake_case, _snake_case, _snake_case, unittest.TestCase ): __UpperCamelCase : Any = StableUnCLIPPipeline __UpperCamelCase : Tuple = TEXT_TO_IMAGE_PARAMS __UpperCamelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCamelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __UpperCamelCase : List[str] = False def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 32 __SCREAMING_SNAKE_CASE = embedder_hidden_size # prior components torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=lowerCamelCase ,projection_dim=lowerCamelCase ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = PriorTransformer( num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=lowerCamelCase ,num_layers=1 ,) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = DDPMScheduler( variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=1000 ,clip_sample=lowerCamelCase ,clip_sample_range=5.0 ,beta_schedule="""squaredcos_cap_v2""" ,) # regular denoising components torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextModel( CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=lowerCamelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type="""projection""" ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=lowerCamelCase ,layers_per_block=1 ,upcast_attention=lowerCamelCase ,use_linear_projection=lowerCamelCase ,) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_schedule="""scaled_linear""" ,beta_start=0.00_085 ,beta_end=0.012 ,prediction_type="""v_prediction""" ,set_alpha_to_one=lowerCamelCase ,steps_offset=1 ,) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL() __SCREAMING_SNAKE_CASE = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ,lowerCamelCase : Tuple=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) __SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" ,torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe("""anime turle""" ,generator=lowerCamelCase ,output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" ,torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE = pipe( """anime turtle""" ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
109
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase_ ( self : int ): '''simple docstring''' lowerCamelCase__: Optional[Any] = 1 lowerCamelCase__: Union[str, Any] = 3 lowerCamelCase__: str = (32, 32) lowerCamelCase__: str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__: Dict = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__: Dict = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def lowerCamelCase_ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__: str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) return CLIPTextModel(__a ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowerCamelCase__: str = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale lowerCamelCase__: Optional[Any] = DDPMScheduler() lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" ) lowerCamelCase__: Tuple = self.dummy_vae lowerCamelCase__: Optional[int] = self.dummy_text_encoder lowerCamelCase__: Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__: Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__: Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) lowerCamelCase__: Any = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase__: List[str] = """A painting of a squirrel eating a burger""" lowerCamelCase__: Dict = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase__: Any = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) lowerCamelCase__: List[str] = output.images lowerCamelCase__: Union[str, Any] = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase__: List[str] = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__a , )[0] lowerCamelCase__: Tuple = image[0, -3:, -3:, -1] lowerCamelCase__: int = image_from_tuple[0, -3:, -3:, -1] lowerCamelCase__: int = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) lowerCamelCase__: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowerCamelCase__: Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale lowerCamelCase__: Optional[int] = DDPMScheduler() lowerCamelCase__: Any = DDIMScheduler(prediction_type="""v_prediction""" ) lowerCamelCase__: List[str] = self.dummy_vae lowerCamelCase__: Optional[Any] = self.dummy_text_encoder lowerCamelCase__: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__: str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__: List[str] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk lowerCamelCase__: Tuple = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) lowerCamelCase__: List[str] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase__: Any = """A painting of a squirrel eating a burger""" lowerCamelCase__: str = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) lowerCamelCase__: Any = output.images assert image.shape[0] == 2 lowerCamelCase__: Optional[Any] = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase__: Dict = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) lowerCamelCase__: Tuple = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__: int = self.dummy_cond_unet_upscale lowerCamelCase__: Dict = DDPMScheduler() lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" ) lowerCamelCase__: List[str] = self.dummy_vae lowerCamelCase__: Tuple = self.dummy_text_encoder lowerCamelCase__: int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__: Union[str, Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 lowerCamelCase__: Optional[int] = unet.half() lowerCamelCase__: Optional[Any] = text_encoder.half() # make sure here that pndm scheduler skips prk lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) lowerCamelCase__: List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase__: Tuple = """A painting of a squirrel eating a burger""" lowerCamelCase__: Optional[int] = torch.manual_seed(0 ) lowerCamelCase__: Optional[Any] = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="""np""" , ).images lowerCamelCase__: Optional[int] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowerCamelCase__: Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) lowerCamelCase__: List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) lowerCamelCase__: Dict = """stabilityai/stable-diffusion-x4-upscaler""" lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() lowerCamelCase__: List[Any] = """a cat sitting on a park bench""" lowerCamelCase__: Dict = torch.manual_seed(0 ) lowerCamelCase__: Any = pipe( prompt=__a , image=__a , generator=__a , output_type="""np""" , ) lowerCamelCase__: Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowerCamelCase__: Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) lowerCamelCase__: int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) lowerCamelCase__: int = """stabilityai/stable-diffusion-x4-upscaler""" lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() lowerCamelCase__: Any = """a cat sitting on a park bench""" lowerCamelCase__: Tuple = torch.manual_seed(0 ) lowerCamelCase__: Optional[int] = pipe( prompt=__a , image=__a , generator=__a , output_type="""np""" , ) lowerCamelCase__: int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__: Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) lowerCamelCase__: Tuple = """stabilityai/stable-diffusion-x4-upscaler""" lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase__: str = """a cat sitting on a park bench""" lowerCamelCase__: int = torch.manual_seed(0 ) lowerCamelCase__: Optional[Any] = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="""np""" , ) lowerCamelCase__: Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
306
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[str]: _UpperCAmelCase = int(number**0.5 ) return number == sq * sq def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]: _UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _UpperCAmelCase = x_den * y_den * z_den _UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) top //= hcf bottom //= hcf return top, bottom def _SCREAMING_SNAKE_CASE ( snake_case = 3_5 ) -> Tuple: _UpperCAmelCase = set() _UpperCAmelCase = 4_2 _UpperCAmelCase = Fraction(0 ) _UpperCAmelCase = 4_2 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _UpperCAmelCase = x_num * y_den + x_den * y_num _UpperCAmelCase = x_den * y_den _UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 _UpperCAmelCase = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _UpperCAmelCase = x_den * x_den * y_den * y_den if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): _UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) _UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) _UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=-1 _UpperCAmelCase = x_num * y_num _UpperCAmelCase = x_den * y_num + x_num * y_den _UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 _UpperCAmelCase = x_num * x_num * y_num * y_num _UpperCAmelCase = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): _UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) _UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) _UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) for num, den in unique_s: total += Fraction(lowerCamelCase_ , lowerCamelCase_ ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
712
# flake8: noqa # Lint as: python3 a = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
175
0
from __future__ import annotations def _a ( __UpperCamelCase : list ,__UpperCamelCase : int | None = None ,__UpperCamelCase : int | None = None ): if start is None: lowerCAmelCase__ : str = 0 if end is None: lowerCAmelCase__ : List[Any] = len(__UpperCamelCase ) - 1 if start >= end: return lowerCAmelCase__ : List[str] = (start + end) // 2 slowsort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) slowsort(__UpperCamelCase ,mid + 1 ,__UpperCamelCase ) if sequence[end] < sequence[mid]: lowerCAmelCase__ , lowerCAmelCase__ : Dict = sequence[mid], sequence[end] slowsort(__UpperCamelCase ,__UpperCamelCase ,end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
233
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Dict = { """configuration_blip_2""": [ """BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Blip2Config""", """Blip2QFormerConfig""", """Blip2VisionConfig""", ], """processing_blip_2""": ["""Blip2Processor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ """BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Blip2Model""", """Blip2QFormerModel""", """Blip2PreTrainedModel""", """Blip2ForConditionalGeneration""", """Blip2VisionModel""", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
233
1
"""simple docstring""" from __future__ import annotations def _lowerCamelCase ( UpperCAmelCase_ : list[int | float], UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> int | float: """simple docstring""" if len(UpperCAmelCase_ ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(UpperCAmelCase_ ) or left < -len(UpperCAmelCase_ ) or right >= len(UpperCAmelCase_ ) or right < -len(UpperCAmelCase_ ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] A__ = (left + right) >> 1 # the middle A__ = find_max(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) # find max in range[left, mid] A__ = find_max(UpperCAmelCase_, mid + 1, UpperCAmelCase_ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
705
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { """configuration_mobilebert""": [ """MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileBertConfig""", """MobileBertOnnxConfig""", ], """tokenization_mobilebert""": ["""MobileBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["""MobileBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ """MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileBertForMaskedLM""", """MobileBertForMultipleChoice""", """MobileBertForNextSentencePrediction""", """MobileBertForPreTraining""", """MobileBertForQuestionAnswering""", """MobileBertForSequenceClassification""", """MobileBertForTokenClassification""", """MobileBertLayer""", """MobileBertModel""", """MobileBertPreTrainedModel""", """load_tf_weights_in_mobilebert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ """TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileBertForMaskedLM""", """TFMobileBertForMultipleChoice""", """TFMobileBertForNextSentencePrediction""", """TFMobileBertForPreTraining""", """TFMobileBertForQuestionAnswering""", """TFMobileBertForSequenceClassification""", """TFMobileBertForTokenClassification""", """TFMobileBertMainLayer""", """TFMobileBertModel""", """TFMobileBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
562
0
from __future__ import annotations import math _lowercase = '''2020.9.26''' _lowercase = '''xcodz-dot, cclaus, dhruvmanila''' def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__): if not all(isinstance(snake_case__ , (float, int)) for val in locals().values()): lowerCAmelCase_ : Optional[int] = F'''Input values must either be float or int: {list(locals().values())}''' raise TypeError(snake_case__) lowerCAmelCase_ : List[str] = ((x * distance) / (z + distance)) * scale lowerCAmelCase_ : Optional[Any] = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__): if not isinstance(snake_case__ , snake_case__): raise TypeError("Axis must be a str") lowerCAmelCase_ : int = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int)) for val in input_variables.values()): lowerCAmelCase_ : Union[str, Any] = ( "Input values except axis must either be float or int: " F'''{list(input_variables.values())}''' ) raise TypeError(snake_case__) lowerCAmelCase_ : str = (angle % 3_60) / 4_50 * 1_80 / math.pi if axis == "z": lowerCAmelCase_ : Optional[Any] = x * math.cos(snake_case__) - y * math.sin(snake_case__) lowerCAmelCase_ : Any = y * math.cos(snake_case__) + x * math.sin(snake_case__) lowerCAmelCase_ : Optional[int] = z elif axis == "x": lowerCAmelCase_ : Union[str, Any] = y * math.cos(snake_case__) - z * math.sin(snake_case__) lowerCAmelCase_ : Tuple = z * math.cos(snake_case__) + y * math.sin(snake_case__) lowerCAmelCase_ : Tuple = x elif axis == "y": lowerCAmelCase_ : Union[str, Any] = x * math.cos(snake_case__) - z * math.sin(snake_case__) lowerCAmelCase_ : Dict = z * math.cos(snake_case__) + x * math.sin(snake_case__) lowerCAmelCase_ : Tuple = y else: raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'") return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }") print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
659
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class __snake_case ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ = StableDiffusionLDMaDPipeline UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase_ ( self : Tuple ) -> str: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,) lowerCAmelCase_ : Any = DDIMScheduler( beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,) torch.manual_seed(0 ) lowerCAmelCase_ : str = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) torch.manual_seed(0 ) lowerCAmelCase_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ ) lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCAmelCase_ : List[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict: '''simple docstring''' if str(lowerCAmelCase__ ).startswith("mps" ): lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ ) else: lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) lowerCAmelCase_ : str = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCAmelCase_ ( self : Any ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ : List[str] = self.get_dummy_components() lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ ) ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1] lowerCAmelCase_ : Tuple = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) lowerCAmelCase_ : Optional[Any] = np.array( [0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] ) lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2 def UpperCAmelCase_ ( self : int ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ : Dict = self.get_dummy_components() lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ ) ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ ) lowerCAmelCase_ : str = 3 * [inputs["prompt"]] # forward lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1] lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1] lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ ) lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )] lowerCAmelCase_ : str = ldmad_pipe.tokenizer( lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,) lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ ) lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0] lowerCAmelCase_ : Optional[int] = prompt_embeds # forward lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1] lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4 def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ : Optional[int] = self.get_dummy_components() lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ ) lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ ) ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = "french fries" lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1] lowerCAmelCase_ : Tuple = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) lowerCAmelCase_ : int = np.array( [0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] ) lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2 @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int: '''simple docstring''' lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) ) lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ) lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ ) ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ ) lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten() lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) lowerCAmelCase_ : int = np.array( [0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] ) lowerCAmelCase_ : Optional[Any] = np.array( [0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3 @nightly @require_torch_gpu class __snake_case ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int: '''simple docstring''' lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) ) lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ ) lowerCAmelCase_ : int = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def UpperCAmelCase_ ( self : Dict ) -> int: '''simple docstring''' lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ ) ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth lowerCAmelCase_ : Dict = 0.495_586 lowerCAmelCase_ : Optional[Any] = 0.33_795_515 lowerCAmelCase_ : Any = 112.48_518 lowerCAmelCase_ : List[Any] = 98.489_746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3 def UpperCAmelCase_ ( self : Tuple ) -> List[str]: '''simple docstring''' lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ ) ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ ) lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth lowerCAmelCase_ : List[str] = 0.4_194_127 lowerCAmelCase_ : List[str] = 0.35_375_586 lowerCAmelCase_ : str = 0.5_638_502 lowerCAmelCase_ : Optional[Any] = 0.34_686_103 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3
659
1
'''simple docstring''' def __snake_case ( _UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog", ): UpperCamelCase = set() # Replace all the whitespace in our sentence UpperCamelCase = input_str.replace(''' ''', '''''') for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower()) return len(a_) == 26 def __snake_case ( _UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog", ): UpperCamelCase = [False] * 26 for char in input_str: if char.islower(): UpperCamelCase = True elif char.isupper(): UpperCamelCase = True return all(a_) def __snake_case ( _UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog", ): return len({char for char in input_str.lower() if char.isalpha()}) == 26 def __snake_case ( ): from timeit import timeit UpperCamelCase = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''', setup=a_)) print(timeit('''is_pangram_faster()''', setup=a_)) print(timeit('''is_pangram_fastest()''', setup=a_)) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
705
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ : Any = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Union[str, Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
350
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ = { """vocab_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt""" ), """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt""" ), """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""", """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json""" ), """bert-base-multilingual-cased""": ( """https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json""" ), """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-cased""": ( """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json""" ), }, } a_ = { """bert-base-uncased""": 512, """bert-large-uncased""": 512, """bert-base-cased""": 512, """bert-large-cased""": 512, """bert-base-multilingual-uncased""": 512, """bert-base-multilingual-cased""": 512, """bert-base-chinese""": 512, """bert-base-german-cased""": 512, """bert-large-uncased-whole-word-masking""": 512, """bert-large-cased-whole-word-masking""": 512, """bert-large-uncased-whole-word-masking-finetuned-squad""": 512, """bert-large-cased-whole-word-masking-finetuned-squad""": 512, """bert-base-cased-finetuned-mrpc""": 512, """bert-base-german-dbmdz-cased""": 512, """bert-base-german-dbmdz-uncased""": 512, """TurkuNLP/bert-base-finnish-cased-v1""": 512, """TurkuNLP/bert-base-finnish-uncased-v1""": 512, """wietsedv/bert-base-dutch-cased""": 512, } a_ = { """bert-base-uncased""": {"""do_lower_case""": True}, """bert-large-uncased""": {"""do_lower_case""": True}, """bert-base-cased""": {"""do_lower_case""": False}, """bert-large-cased""": {"""do_lower_case""": False}, """bert-base-multilingual-uncased""": {"""do_lower_case""": True}, """bert-base-multilingual-cased""": {"""do_lower_case""": False}, """bert-base-chinese""": {"""do_lower_case""": False}, """bert-base-german-cased""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False}, """bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-cased""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True}, """TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False}, """TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True}, """wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False}, } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = BertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**__UpperCAmelCase ) __lowerCamelCase = do_lower_case def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
175
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) a_ = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase = {} state_dict.pop('''pixel_mean''' ,_UpperCamelCase ) state_dict.pop('''pixel_std''' ,_UpperCamelCase ) __lowerCamelCase = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __lowerCamelCase = key.replace(_UpperCamelCase ,_UpperCamelCase ) if re.match(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = int(re.match(_UpperCamelCase ,_UpperCamelCase ).group(2 ) ) if layer_nb == 0: __lowerCamelCase = key.replace('''layers.0''' ,'''proj_in''' ) elif layer_nb == 1: __lowerCamelCase = key.replace('''layers.1''' ,'''layers.0''' ) elif layer_nb == 2: __lowerCamelCase = key.replace('''layers.2''' ,'''proj_out''' ) __lowerCamelCase = value __lowerCamelCase = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[int]="ybelkada/segment-anything" ): __lowerCamelCase = hf_hub_download(_UpperCamelCase ,F"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: __lowerCamelCase = SamConfig() elif "sam_vit_l" in model_name: __lowerCamelCase = SamVisionConfig( hidden_size=10_24 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,) __lowerCamelCase = SamConfig( vision_config=_UpperCamelCase ,) elif "sam_vit_h" in model_name: __lowerCamelCase = SamVisionConfig( hidden_size=12_80 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,) __lowerCamelCase = SamConfig( vision_config=_UpperCamelCase ,) __lowerCamelCase = torch.load(_UpperCamelCase ,map_location='''cpu''' ) __lowerCamelCase = replace_keys(_UpperCamelCase ) __lowerCamelCase = SamImageProcessor() __lowerCamelCase = SamProcessor(image_processor=_UpperCamelCase ) __lowerCamelCase = SamModel(_UpperCamelCase ) hf_model.load_state_dict(_UpperCamelCase ) __lowerCamelCase = hf_model.to('''cuda''' ) __lowerCamelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' __lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert('''RGB''' ) __lowerCamelCase = [[[4_00, 6_50]]] __lowerCamelCase = [[1]] __lowerCamelCase = processor(images=np.array(_UpperCamelCase ) ,return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowerCamelCase = hf_model(**_UpperCamelCase ) __lowerCamelCase = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_890_251_159_668 __lowerCamelCase = processor( images=np.array(_UpperCamelCase ) ,input_points=_UpperCamelCase ,input_labels=_UpperCamelCase ,return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowerCamelCase = hf_model(**_UpperCamelCase ) __lowerCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_712_603_092_193_604 __lowerCamelCase = ((75, 2_75, 17_25, 8_50),) __lowerCamelCase = processor(images=np.array(_UpperCamelCase ) ,input_boxes=_UpperCamelCase ,return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowerCamelCase = hf_model(**_UpperCamelCase ) __lowerCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_686_015_605_926_514 # Test with 2 points and 1 image. __lowerCamelCase = [[[4_00, 6_50], [8_00, 6_50]]] __lowerCamelCase = [[1, 1]] __lowerCamelCase = processor( images=np.array(_UpperCamelCase ) ,input_points=_UpperCamelCase ,input_labels=_UpperCamelCase ,return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowerCamelCase = hf_model(**_UpperCamelCase ) __lowerCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_936_047_792_434_692 if __name__ == "__main__": a_ = argparse.ArgumentParser() a_ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) a_ = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
175
1
__lowercase = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n""" __lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __lowercase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
717
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __lowercase = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class _lowercase : _lowercase : str _lowercase : Optional[str] = None _lowercase : Optional[Union[str, int]] = None _lowercase : Optional[Union[str, int]] = None _lowercase : Optional[Union[str, int]] = None def UpperCamelCase ( self : Any ) -> Tuple: """simple docstring""" A_ ,A_ ,A_ = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> Any: """simple docstring""" return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def UpperCamelCase ( self : int ) -> Any: """simple docstring""" return self.major, self.minor, self.patch def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> List[str]: """simple docstring""" if isinstance(lowerCamelCase__ , lowerCamelCase__ ): return Version(lowerCamelCase__ ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): return other raise TypeError(F"{other} (type {type(lowerCamelCase__ )}) cannot be compared to version." ) def __eq__( self : List[Any] , lowerCamelCase__ : int ) -> Optional[Any]: """simple docstring""" try: A_ = self._validate_operand(lowerCamelCase__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Optional[int] , lowerCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" A_ = self._validate_operand(lowerCamelCase__ ) return self.tuple < other.tuple def __hash__( self : List[str] ) -> List[str]: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def UpperCamelCase ( cls : List[Any] , lowerCamelCase__ : Optional[Any] ) -> Any: """simple docstring""" A_ = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def UpperCamelCase ( self : Dict ) -> str: """simple docstring""" return self.version_str def _lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A_ = _VERSION_REG.match(SCREAMING_SNAKE_CASE ) if not res: raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." ) return tuple(int(SCREAMING_SNAKE_CASE ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def _lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' return ".".join(str(SCREAMING_SNAKE_CASE ) for v in version_tuple )
563
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class _a ( unittest.TestCase ): def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=True, ) -> Optional[Any]: UpperCAmelCase_: List[Any] = parent UpperCAmelCase_: List[Any] = batch_size UpperCAmelCase_: Any = num_channels UpperCAmelCase_: List[str] = image_size UpperCAmelCase_: Dict = min_resolution UpperCAmelCase_: List[str] = max_resolution UpperCAmelCase_: str = do_resize UpperCAmelCase_: int = size_divisor UpperCAmelCase_: Optional[int] = do_rescale def __snake_case (self ) -> Optional[Any]: return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class _a ( _lowerCAmelCase , unittest.TestCase ): A = GLPNImageProcessor if is_vision_available() else None def __snake_case (self ) -> Union[str, Any]: UpperCAmelCase_: Dict = GLPNImageProcessingTester(self ) @property def __snake_case (self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def __snake_case (self ) -> str: UpperCAmelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__, """size_divisor""" ) ) self.assertTrue(hasattr(lowerCamelCase__, """resample""" ) ) self.assertTrue(hasattr(lowerCamelCase__, """do_rescale""" ) ) def __snake_case (self ) -> List[str]: pass def __snake_case (self ) -> List[str]: # Initialize image_processing UpperCAmelCase_: int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_: str = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__, Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_: Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __snake_case (self ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_: str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_: Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__, np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_: List[str] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __snake_case (self ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_: Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__, torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_: List[str] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
556
from __future__ import annotations def A ( lowercase__ : list[int] ) -> bool: return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
45
0
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( _snake_case : int, _snake_case : int ): if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) _lowercase = number_of_bytes // partitions _lowercase = [] for i in range(lowerCamelCase_ ): _lowercase = i * bytes_per_partition + 1 _lowercase = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f"""{start_bytes}-{end_bytes}""" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
712
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Tuple = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = ["PoolFormerFeatureExtractor"] __UpperCamelCase : List[str] = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys __UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
227
0