code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} # See all BART models at https://huggingface.co/models?filter=bart lowerCamelCase__ : Optional[Any] = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, """tokenizer_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""", }, } lowerCamelCase__ : Dict = { """facebook/bart-base""": 1_0_2_4, """facebook/bart-large""": 1_0_2_4, """facebook/bart-large-mnli""": 1_0_2_4, """facebook/bart-large-cnn""": 1_0_2_4, """facebook/bart-large-xsum""": 1_0_2_4, """yjernite/bart_eli5""": 1_0_2_4, } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : int = VOCAB_FILES_NAMES __lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : int = ['input_ids', 'attention_mask'] __lowercase : int = BartTokenizer def __init__( self:int , _a:Any=None , _a:Dict=None , _a:Any=None , _a:str="replace" , _a:Dict="<s>" , _a:Any="</s>" , _a:List[str]="</s>" , _a:Optional[Any]="<s>" , _a:List[Any]="<unk>" , _a:Optional[Any]="<pad>" , _a:str="<mask>" , _a:Tuple=False , _a:Optional[Any]=True , **_a:List[str] , ): super().__init__( _a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , ) snake_case__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space: snake_case__ = getattr(_a , pre_tok_state.pop('''type''' ) ) snake_case__ = add_prefix_space snake_case__ = pre_tok_class(**_a ) snake_case__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case__ = '''post_processor''' snake_case__ = getattr(self.backend_tokenizer , _a , _a ) if tokenizer_component_instance: snake_case__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case__ = tuple(state['''sep'''] ) if "cls" in state: snake_case__ = tuple(state['''cls'''] ) snake_case__ = False if state.get('''add_prefix_space''' , _a ) != add_prefix_space: snake_case__ = add_prefix_space snake_case__ = True if state.get('''trim_offsets''' , _a ) != trim_offsets: snake_case__ = trim_offsets snake_case__ = True if changes_to_apply: snake_case__ = getattr(_a , state.pop('''type''' ) ) snake_case__ = component_class(**_a ) setattr(self.backend_tokenizer , _a , _a ) @property def SCREAMING_SNAKE_CASE__ ( self:Any ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Union[str, Any] ): snake_case__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value snake_case__ = value def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:int , **_a:Tuple ): snake_case__ = kwargs.get('''is_split_into_words''' , _a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:str , *_a:Dict , **_a:Tuple ): snake_case__ = kwargs.get('''is_split_into_words''' , _a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:Optional[str] = None ): snake_case__ = self._tokenizer.model.save(_a , name=_a ) return tuple(_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Optional[int] , _a:Tuple=None ): snake_case__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[int] , _a:Optional[List[int]] = None ): snake_case__ = [self.sep_token_id] snake_case__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
33
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __magic_name__ : '''simple docstring''' __lowercase : int = BlenderbotConfig __lowercase : Any = {} __lowercase : Optional[Any] = 'gelu' def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a ) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ): snake_case__ = TFBlenderbotModel(config=_a ).get_decoder() snake_case__ = inputs_dict['''input_ids'''] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict['''attention_mask'''][:1, :] snake_case__ = inputs_dict['''head_mask'''] snake_case__ = 1 # first forward pass snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case__ = model(_a , attention_mask=_a )[0] snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple: if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowercase : Tuple = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Any = True __lowercase : int = False __lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFBlenderbotModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_tokenizers @require_tf class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.'] __lowercase : Optional[int] = 'facebook/blenderbot-400M-distill' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) snake_case__ = self.model.generate( model_inputs.input_ids , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
33
1
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = OpenAIGPTTokenizer __lowercase : int = OpenAIGPTTokenizerFast __lowercase : str = True __lowercase : Dict = False def SCREAMING_SNAKE_CASE__ ( self:List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case__ = dict(zip(_a , range(len(_a ) ) ) ) snake_case__ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(_a ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Dict ): return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) snake_case__ = '''lower''' snake_case__ = ['''low''', '''er</w>'''] snake_case__ = tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) snake_case__ = tokens + ['''<unk>'''] snake_case__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Dict=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case__ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) # Simple input snake_case__ = '''This is a simple input''' snake_case__ = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case__ = ('''This is a simple input''', '''This is a pair''') snake_case__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' ) # Simple input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' ) # Simple input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , ) # Pair input self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' ) # Pair input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' ) # Pair input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , ) def SCREAMING_SNAKE_CASE__ ( self:Any ): pass @require_ftfy @require_spacy @require_tokenizers class __magic_name__ (snake_case_ ): '''simple docstring''' pass
33
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = 0 def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:str ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop('''image_processor_type''' ) snake_case__ = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved snake_case__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): with self.assertRaisesRegex( _a , '''clip-base is not a local folder and is not a valid model identifier''' ): snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): with self.assertRaisesRegex( _a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = True try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
33
1
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets lowerCamelCase__ : Tuple = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ lowerCamelCase__ : List[str] = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ lowerCamelCase__ : Optional[int] = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:Union[str, Any] , _a:List[Any] , _a:str=None , _a:Optional[int]=True , _a:Optional[Any]=False ): if rouge_types is None: snake_case__ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] snake_case__ = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a ) if use_aggregator: snake_case__ = scoring.BootstrapAggregator() else: snake_case__ = [] for ref, pred in zip(_a , _a ): snake_case__ = scorer.score(_a , _a ) if use_aggregator: aggregator.add_scores(_a ) else: scores.append(_a ) if use_aggregator: snake_case__ = aggregator.aggregate() else: snake_case__ = {} for key in scores[0]: snake_case__ = [score[key] for score in scores] return result
33
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int: snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ = '''''' else: snake_case__ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: snake_case__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: snake_case__ = dct.pop(__lowerCAmelCase ) snake_case__ = val def SCREAMING_SNAKE_CASE ( ) -> str: snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = ViTConfig() snake_case__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case__ = True snake_case__ = int(vit_name[-12:-10] ) snake_case__ = int(vit_name[-9:-6] ) else: snake_case__ = 1000 snake_case__ = '''huggingface/label-files''' snake_case__ = '''imagenet-1k-id2label.json''' snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(vit_name[-6:-4] ) snake_case__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif vit_name[9:].startswith('''small''' ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 else: pass else: if vit_name[4:].startswith('''small''' ): snake_case__ = 768 snake_case__ = 2304 snake_case__ = 8 snake_case__ = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 elif vit_name[4:].startswith('''huge''' ): snake_case__ = 1280 snake_case__ = 5120 snake_case__ = 32 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() if base_model: remove_classification_head_(__lowerCAmelCase ) snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ = ViTModel(__lowerCAmelCase ).eval() else: snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval() model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case__ = DeiTImageProcessor(size=config.image_size ) else: snake_case__ = ViTImageProcessor(size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case__ = encoding['''pixel_values'''] snake_case__ = model(__lowerCAmelCase ) if base_model: snake_case__ = timm_model.forward_features(__lowerCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 ) else: snake_case__ = timm_model(__lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase__ : str = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
33
1
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
33
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = ['image_processor', 'tokenizer'] __lowercase : str = 'AutoImageProcessor' __lowercase : Dict = 'AutoTokenizer' def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ): snake_case__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) snake_case__ = kwargs.pop('''feature_extractor''' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) snake_case__ = self.image_processor snake_case__ = False def __call__( self:Optional[int] , *_a:str , **_a:int ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) snake_case__ = kwargs.pop('''images''' , _a ) snake_case__ = kwargs.pop('''text''' , _a ) if len(_a ) > 0: snake_case__ = args[0] snake_case__ = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case__ = self.image_processor(_a , *_a , **_a ) if text is not None: snake_case__ = self.tokenizer(_a , **_a ) if text is None: return inputs elif images is None: return encodings else: snake_case__ = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ): return self.tokenizer.decode(*_a , **_a ) @contextmanager def SCREAMING_SNAKE_CASE__ ( self:Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) snake_case__ = True snake_case__ = self.tokenizer yield snake_case__ = self.image_processor snake_case__ = False def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ): if added_vocab is None: snake_case__ = self.tokenizer.get_added_vocab() snake_case__ = {} while tokens: snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE ) if start_token is None: break snake_case__ = start_token.group(1 ) snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE ) snake_case__ = start_token.group() if end_token is None: snake_case__ = tokens.replace(_a , '''''' ) else: snake_case__ = end_token.group() snake_case__ = re.escape(_a ) snake_case__ = re.escape(_a ) snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE ) if content is not None: snake_case__ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a ) if value: if len(_a ) == 1: snake_case__ = value[0] snake_case__ = value else: # leaf nodes snake_case__ = [] for leaf in content.split(r'''<sep/>''' ): snake_case__ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": snake_case__ = leaf[1:-2] # for categorical special tokens output[key].append(_a ) if len(output[key] ) == 1: snake_case__ = output[key][0] snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a ) if len(_a ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
33
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = ['image_processor', 'tokenizer'] __lowercase : Optional[int] = 'BridgeTowerImageProcessor' __lowercase : int = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self:Union[str, Any] , _a:Union[str, Any] , _a:Any ): super().__init__(_a , _a ) def __call__( self:str , _a:Tuple , _a:Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a:bool = True , _a:Union[bool, str, PaddingStrategy] = False , _a:Union[bool, str, TruncationStrategy] = None , _a:Optional[int] = None , _a:int = 0 , _a:Optional[int] = None , _a:Optional[bool] = None , _a:Optional[bool] = None , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = True , _a:Optional[Union[str, TensorType]] = None , **_a:Dict , ): snake_case__ = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask snake_case__ = self.image_processor( _a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a ) encoding.update(_a ) return encoding def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Optional[int] , **_a:Optional[Any] ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , *_a:Tuple , **_a:List[str] ): return self.tokenizer.decode(*_a , **_a ) @property def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.tokenizer.model_input_names snake_case__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
33
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : '''simple docstring''' def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = num_channels snake_case__ = embeddings_size snake_case__ = hidden_sizes snake_case__ = depths snake_case__ = is_training snake_case__ = use_labels snake_case__ = hidden_act snake_case__ = num_labels snake_case__ = scope snake_case__ = len(_a ) snake_case__ = out_features snake_case__ = out_indices snake_case__ = num_groups def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ): snake_case__ = BitModel(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ): snake_case__ = self.num_labels snake_case__ = BitForImageClassification(_a ) model.to(_a ) model.eval() snake_case__ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ): snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ = None snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ = config_and_inputs snake_case__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowercase : int = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) __lowercase : Tuple = False __lowercase : Optional[Any] = False __lowercase : str = False __lowercase : Tuple = False __lowercase : Tuple = False def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = BitModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return @unittest.skip(reason='''Bit does not output attentions''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(_a ) snake_case__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(config=_a ) for name, module in model.named_modules(): if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ): snake_case__ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): snake_case__ = model(**self._prepare_for_class(_a , _a ) ) snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ = self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ = layer_type snake_case__ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ = True check_hidden_states_output(_a , _a , _a ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = BitModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def SCREAMING_SNAKE_CASE ( ) -> Any: snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a ) # forward pass with torch.no_grad(): snake_case__ = model(**_a ) # verify the logits snake_case__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _a ) snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @require_torch class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else () __lowercase : int = BitConfig __lowercase : Any = False def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = BitModelTester(self )
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : Dict = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'vivit' def __init__( self:Tuple , _a:Any=2_24 , _a:Tuple=32 , _a:Optional[Any]=[2, 16, 16] , _a:str=3 , _a:Union[str, Any]=7_68 , _a:str=12 , _a:str=12 , _a:str=30_72 , _a:str="gelu_fast" , _a:Any=0.0 , _a:int=0.0 , _a:str=0.02 , _a:List[str]=1e-06 , _a:int=True , **_a:int , ): snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = image_size snake_case__ = num_frames snake_case__ = tubelet_size snake_case__ = num_channels snake_case__ = qkv_bias super().__init__(**_a )
33
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCamelCase__ : Any = """\ """ lowerCamelCase__ : List[str] = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ lowerCamelCase__ : Any = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case__ = '''cuda''' else: snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' snake_case__ = AutoModelForCausalLM.from_pretrained(_a ) snake_case__ = model.to(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case__ = model.config.max_length - 1 else: snake_case__ = model.config.max_length snake_case__ = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) snake_case__ = encodings['''input_ids'''] snake_case__ = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case__ = [] snake_case__ = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): snake_case__ = min(start_index + batch_size , len(_a ) ) snake_case__ = encoded_texts[start_index:end_index] snake_case__ = attn_masks[start_index:end_index] if add_start_token: snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) snake_case__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) snake_case__ = encoded_batch with torch.no_grad(): snake_case__ = model(_a , attention_mask=_a ).logits snake_case__ = out_logits[..., :-1, :].contiguous() snake_case__ = labels[..., 1:].contiguous() snake_case__ = attn_mask[..., 1:].contiguous() snake_case__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
33
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = inspect.getfile(accelerate.test_utils ) snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) snake_case__ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): print(F"""Found {torch.cuda.device_count()} devices.""" ) snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): print(F"""Found {torch.cuda.device_count()} devices.""" ) snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:Tuple ): print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(_a , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase__ : Optional[int] = Accelerator() lowerCamelCase__ : Union[str, Any] = (accelerator.state.process_index + 2, 1_0) lowerCamelCase__ : List[str] = torch.randint(0, 1_0, shape).to(accelerator.device) lowerCamelCase__ : Union[str, Any] = """""" lowerCamelCase__ : str = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCamelCase__ : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCamelCase__ : str = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
33
import os from datetime import datetime as dt from github import Github lowerCamelCase__ : int = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] ) snake_case__ = g.get_repo('''huggingface/diffusers''' ) snake_case__ = repo.get_issues(state='''open''' ) for issue in open_issues: snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
33
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''ZinengTang/tvlt-base''' snake_case__ = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _a ) self.assertIsInstance(processor.image_processor , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = feature_extractor(_a , return_tensors='''np''' ) snake_case__ = processor(audio=_a , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = image_processor(_a , return_tensors='''np''' ) snake_case__ = processor(images=_a , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = processor(audio=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
33
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = _distribute_shards(**__lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if expected is RuntimeError: with pytest.raises(__lowerCAmelCase ): _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) else: snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) assert out == expected
33
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase lowerCamelCase__ : Tuple = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Dict = 'longformer' def __init__( self:str , _a:Union[List[int], int] = 5_12 , _a:int = 2 , _a:int = 1 , _a:int = 0 , _a:int = 2 , _a:int = 3_05_22 , _a:int = 7_68 , _a:int = 12 , _a:int = 12 , _a:int = 30_72 , _a:str = "gelu" , _a:float = 0.1 , _a:float = 0.1 , _a:int = 5_12 , _a:int = 2 , _a:float = 0.02 , _a:float = 1e-12 , _a:bool = False , **_a:Any , ): super().__init__(pad_token_id=_a , **_a ) snake_case__ = attention_window snake_case__ = sep_token_id snake_case__ = bos_token_id snake_case__ = eos_token_id snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = hidden_act snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = onnx_export class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:Tuple , _a:"PretrainedConfig" , _a:str = "default" , _a:"List[PatchingSpec]" = None ): super().__init__(_a , _a , _a ) snake_case__ = True @property def SCREAMING_SNAKE_CASE__ ( self:Dict ): if self.task == "multiple-choice": snake_case__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = super().outputs if self.task == "default": snake_case__ = {0: '''batch'''} return outputs @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return 1e-4 @property def SCREAMING_SNAKE_CASE__ ( self:Dict ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:"PreTrainedTokenizerBase" , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional[TensorType] = None , ): snake_case__ = super().generate_dummy_inputs( preprocessor=_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly snake_case__ = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global snake_case__ = 1 return inputs
33
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : str = IFImgaImgSuperResolutionPipeline __lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} __lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) __lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'} def SCREAMING_SNAKE_CASE__ ( self:Dict ): return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:str ): self._test_save_load_local() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
33
1
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler('''sample_euler''' ) snake_case__ = '''A painting of a squirrel eating a burger''' snake_case__ = torch.manual_seed(0 ) snake_case__ = sd_pipe([prompt] , generator=_a , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) snake_case__ = output.images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) snake_case__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler('''sample_euler''' ) snake_case__ = '''A painting of a squirrel eating a burger''' snake_case__ = torch.manual_seed(0 ) snake_case__ = sd_pipe([prompt] , generator=_a , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) snake_case__ = output.images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) snake_case__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) snake_case__ = '''A painting of a squirrel eating a burger''' snake_case__ = torch.manual_seed(0 ) snake_case__ = sd_pipe( [prompt] , generator=_a , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_a , ) snake_case__ = output.images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) snake_case__ = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
33
import math class __magic_name__ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ): snake_case__ = 0.0 snake_case__ = 0.0 for i in range(len(_a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ): for i in range(len(_a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def SCREAMING_SNAKE_CASE ( ) -> None: # Training Examples ( m, n ) snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case__ = SelfOrganizingMap() snake_case__ = 3 snake_case__ = 0.5 for _ in range(__lowerCAmelCase ): for j in range(len(__lowerCAmelCase ) ): # training sample snake_case__ = training_samples[j] # Compute the winning vector snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # Update the winning vector snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # classify test sample snake_case__ = [0, 0, 0, 1] snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
33
1
from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase__ : str = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'trajectory_transformer' __lowercase : List[Any] = ['past_key_values'] __lowercase : Dict = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self:List[Any] , _a:Any=1_00 , _a:List[str]=5 , _a:Optional[Any]=1 , _a:List[Any]=1 , _a:Optional[int]=2_49 , _a:Dict=6 , _a:int=17 , _a:Union[str, Any]=25 , _a:Optional[Any]=4 , _a:Union[str, Any]=4 , _a:int=1_28 , _a:Optional[Any]=0.1 , _a:Tuple=0.1 , _a:List[Any]=0.1 , _a:List[str]=0.0006 , _a:List[str]=5_12 , _a:Tuple=0.02 , _a:Optional[Any]=1e-12 , _a:Tuple=1 , _a:Dict=True , _a:str=1 , _a:Union[str, Any]=5_02_56 , _a:List[str]=5_02_56 , **_a:List[str] , ): snake_case__ = vocab_size snake_case__ = action_weight snake_case__ = reward_weight snake_case__ = value_weight snake_case__ = max_position_embeddings snake_case__ = block_size snake_case__ = action_dim snake_case__ = observation_dim snake_case__ = transition_dim snake_case__ = learning_rate snake_case__ = n_layer snake_case__ = n_head snake_case__ = n_embd snake_case__ = embd_pdrop snake_case__ = attn_pdrop snake_case__ = resid_pdrop snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = kaiming_initializer_range snake_case__ = use_cache super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
33
from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes snake_case__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] snake_case__ = [] snake_case__ = 0 snake_case__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: snake_case__ = [] snake_case__ = -1 for i in range(__lowerCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: snake_case__ = i total_time += burst_time[target_process] completed += 1 snake_case__ = 0 snake_case__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7] lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0] lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
33
1
import collections import importlib.util import os import re from pathlib import Path lowerCamelCase__ : str = """src/transformers""" # Matches is_xxx_available() lowerCamelCase__ : List[str] = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCamelCase__ : Any = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCamelCase__ : Optional[Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCamelCase__ : Dict = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCamelCase__ : Tuple = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCamelCase__ : Dict = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCamelCase__ : List[Any] = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCamelCase__ : Dict = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCamelCase__ : List[Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCamelCase__ : Dict = re.compile(r"""^\s*try:""") # Catches a line with else: lowerCamelCase__ : Dict = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: if _re_test_backend.search(__lowerCAmelCase ) is None: return None snake_case__ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )] backends.sort() return "_and_".join(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: snake_case__ = f.readlines() snake_case__ = 0 while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__lowerCAmelCase ): return None # First grab the objects without a specific backend in _import_structure snake_case__ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: snake_case__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__lowerCAmelCase ): snake_case__ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0] snake_case__ = re.findall('''\[([^\]]+)\]''' , __lowerCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue snake_case__ = _re_import_struct_key_value.search(__lowerCAmelCase ) if single_line_import_search is not None: snake_case__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 snake_case__ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): snake_case__ = lines[line_index] if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None: snake_case__ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(''', ''' ) snake_case__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_between_brackets.search(__lowerCAmelCase ) is not None: snake_case__ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(''', ''' ) snake_case__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_quote_object.search(__lowerCAmelCase ) is not None: objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 snake_case__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case__ = [] while ( line_index < len(__lowerCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): snake_case__ = lines[line_index] snake_case__ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case__ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(__lowerCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. snake_case__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): snake_case__ = lines[line_index] snake_case__ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: def find_duplicates(__lowerCAmelCase ): return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case__ = [] for key in import_dict_objects.keys(): snake_case__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) snake_case__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case__ = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def SCREAMING_SNAKE_CASE ( ) -> str: snake_case__ = [] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: snake_case__ = os.path.join(__lowerCAmelCase , '''__init__.py''' ) snake_case__ = parse_init(__lowerCAmelCase ) if objects is not None: snake_case__ = analyze_results(*__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(__lowerCAmelCase ) ) if len(__lowerCAmelCase ) > 0: raise ValueError('''\n\n'''.join(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( ) -> Tuple: snake_case__ = [] for path, directories, files in os.walk(__lowerCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(__lowerCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__lowerCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue snake_case__ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) ) snake_case__ = short_path.replace(os.path.sep , '''.''' ) submodules.append(__lowerCAmelCase ) for fname in files: if fname == "__init__.py": continue snake_case__ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) ) snake_case__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(__lowerCAmelCase ) return submodules lowerCamelCase__ : Optional[int] = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: # This is to make sure the transformers module imported is the one in the repo. snake_case__ = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(__lowerCAmelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) snake_case__ = spec.loader.load_module() snake_case__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__lowerCAmelCase ) > 0: snake_case__ = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
33
lowerCamelCase__ : List[str] = """Alexander Joslin""" import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} snake_case__ = Stack() snake_case__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCAmelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCAmelCase ) elif i == ")": # RULE 4 snake_case__ = operator_stack.peek() operator_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase ) operand_stack.push(__lowerCAmelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
33
1
from collections.abc import Sequence def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = False ) -> float: if not arr: return 0 snake_case__ = 0 if allow_empty_subarrays else float('''-inf''' ) snake_case__ = 0.0 for num in arr: snake_case__ = max(0 if allow_empty_subarrays else num , curr_sum + num ) snake_case__ = max(__lowerCAmelCase , __lowerCAmelCase ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase__ : Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"""{max_subarray_sum(nums) = }""")
33
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCamelCase__ : int = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ): warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _a , ) super().__init__(*_a , **_a )
33
1
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "laptop" ) -> DataFrame: snake_case__ = F"""https://www.amazon.in/laptop/s?k={product}""" snake_case__ = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).text ) # Initialize a Pandas dataframe with the column titles snake_case__ = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: snake_case__ = item.ha.text snake_case__ = '''https://www.amazon.in/''' + item.ha.a['''href'''] snake_case__ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: snake_case__ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: snake_case__ = '''Not available''' try: snake_case__ = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: snake_case__ = '''''' try: snake_case__ = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: snake_case__ = float('''nan''' ) except AttributeError: pass snake_case__ = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] snake_case__ = ''' ''' snake_case__ = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = """headphones""" get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ : Tuple = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int , _a:str , _a:Optional[Any] ): snake_case__ = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case__ = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 ) snake_case__ = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:Optional[Any] ): for example in examples: snake_case__ = video_classifier(_a ) self.assertEqual( _a , [ {'''score''': ANY(_a ), '''label''': ANY(_a )}, {'''score''': ANY(_a ), '''label''': ANY(_a )}, ] , ) @require_torch def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' snake_case__ = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) snake_case__ = pipeline( '''video-classification''' , model=_a , feature_extractor=_a , frame_sampling_rate=4 ) snake_case__ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case__ = video_classifier(_a , top_k=2 ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) snake_case__ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass
33
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]: snake_case__ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Dict = StableDiffusionLatentUpscalePipeline __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } __lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} __lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowercase : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __lowercase : List[Any] = frozenset([] ) __lowercase : Any = True @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = 1 snake_case__ = 4 snake_case__ = (16, 16) snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a ) return image def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): torch.manual_seed(0 ) snake_case__ = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) snake_case__ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) snake_case__ = CLIPTextModel(_a ) snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''cpu''' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) snake_case__ = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): super().test_save_load_local(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = 2 snake_case__ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue snake_case__ = getattr(_a , scheduler_enum.name ) snake_case__ = scheduler_cls.from_config(pipe.scheduler.config ) snake_case__ = pipe(**_a )[0] outputs.append(_a ) assert check_same_shape(_a ) @require_torch_gpu @slow class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' snake_case__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-2
33
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'data2vec-vision' def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ): super().__init__(**_a ) snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = use_mask_token snake_case__ = use_absolute_position_embeddings snake_case__ = use_relative_position_bias snake_case__ = use_shared_relative_position_bias snake_case__ = layer_scale_init_value snake_case__ = drop_path_rate snake_case__ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case__ = out_indices snake_case__ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case__ = use_auxiliary_head snake_case__ = auxiliary_loss_weight snake_case__ = auxiliary_channels snake_case__ = auxiliary_num_convs snake_case__ = auxiliary_concat_input snake_case__ = semantic_loss_ignore_index class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Any = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return 1e-4
33
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''ZinengTang/tvlt-base''' snake_case__ = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _a ) self.assertIsInstance(processor.image_processor , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = feature_extractor(_a , return_tensors='''np''' ) snake_case__ = processor(audio=_a , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = image_processor(_a , return_tensors='''np''' ) snake_case__ = processor(images=_a , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = processor(audio=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
33
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : torch.FloatTensor __lowercase : torch.FloatTensor class __magic_name__ (snake_case_ ,snake_case_ ): '''simple docstring''' __lowercase : Any = 1 @register_to_config def __init__( self:str , _a:int = 20_00 , _a:float = 0.15 , _a:float = 0.01 , _a:float = 1348.0 , _a:float = 1e-5 , _a:int = 1 , ): # standard deviation of the initial noise distribution snake_case__ = sigma_max # setable values snake_case__ = None self.set_sigmas(_a , _a , _a , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:torch.FloatTensor , _a:Optional[int] = None ): return sample def SCREAMING_SNAKE_CASE__ ( self:str , _a:int , _a:float = None , _a:Union[str, torch.device] = None ): snake_case__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps snake_case__ = torch.linspace(1 , _a , _a , device=_a ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:int , _a:float = None , _a:float = None , _a:float = None ): snake_case__ = sigma_min if sigma_min is not None else self.config.sigma_min snake_case__ = sigma_max if sigma_max is not None else self.config.sigma_max snake_case__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_a , _a ) snake_case__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) snake_case__ = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) ) snake_case__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:Optional[Any] ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:torch.FloatTensor , _a:int , _a:torch.FloatTensor , _a:Optional[torch.Generator] = None , _a:bool = True , ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) snake_case__ = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) snake_case__ = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda snake_case__ = timesteps.to(self.discrete_sigmas.device ) snake_case__ = self.discrete_sigmas[timesteps].to(sample.device ) snake_case__ = self.get_adjacent_sigma(_a , _a ).to(sample.device ) snake_case__ = torch.zeros_like(_a ) snake_case__ = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods snake_case__ = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): snake_case__ = diffusion.unsqueeze(-1 ) snake_case__ = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of snake_case__ = randn_tensor( sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype ) snake_case__ = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? snake_case__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:torch.FloatTensor , _a:torch.FloatTensor , _a:Optional[torch.Generator] = None , _a:bool = True , ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction snake_case__ = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device ) # compute step size from the model_output, the noise, and the snr snake_case__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() snake_case__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() snake_case__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 snake_case__ = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term snake_case__ = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): snake_case__ = step_size.unsqueeze(-1 ) snake_case__ = sample + step_size * model_output snake_case__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:torch.FloatTensor , _a:torch.FloatTensor , _a:torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples snake_case__ = timesteps.to(original_samples.device ) snake_case__ = self.discrete_sigmas.to(original_samples.device )[timesteps] snake_case__ = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_a ) * sigmas[:, None, None, None] ) snake_case__ = noise + original_samples return noisy_samples def __len__( self:Tuple ): return self.config.num_train_timesteps
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'data2vec-vision' def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ): super().__init__(**_a ) snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = use_mask_token snake_case__ = use_absolute_position_embeddings snake_case__ = use_relative_position_bias snake_case__ = use_shared_relative_position_bias snake_case__ = layer_scale_init_value snake_case__ = drop_path_rate snake_case__ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case__ = out_indices snake_case__ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case__ = use_auxiliary_head snake_case__ = auxiliary_loss_weight snake_case__ = auxiliary_channels snake_case__ = auxiliary_num_convs snake_case__ = auxiliary_concat_input snake_case__ = semantic_loss_ignore_index class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Any = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return 1e-4
33
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Dict = (UniPCMultistepScheduler,) __lowercase : Dict = (('num_inference_steps', 25),) def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:int ): snake_case__ = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**_a ) return config def SCREAMING_SNAKE_CASE__ ( self:str , _a:Dict=0 , **_a:Tuple ): snake_case__ = dict(self.forward_default_kwargs ) snake_case__ = kwargs.pop('''num_inference_steps''' , _a ) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config(**_a ) snake_case__ = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals snake_case__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) snake_case__ = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals snake_case__ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case__ , snake_case__ = sample, sample for t in range(_a , time_step + scheduler.config.solver_order + 1 ): snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample snake_case__ = new_scheduler.step(_a , _a , _a , **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str=0 , **_a:List[str] ): snake_case__ = dict(self.forward_default_kwargs ) snake_case__ = kwargs.pop('''num_inference_steps''' , _a ) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) snake_case__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) snake_case__ = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) snake_case__ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample snake_case__ = new_scheduler.step(_a , _a , _a , **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[Any]=None , **_a:List[str] ): if scheduler is None: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(**_a ) snake_case__ = scheduler_class(**_a ) snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(**_a ) snake_case__ = scheduler_class(**_a ) snake_case__ = 10 snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): snake_case__ = model(_a , _a ) snake_case__ = scheduler.step(_a , _a , _a ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = dict(self.forward_default_kwargs ) snake_case__ = kwargs.pop('''num_inference_steps''' , _a ) for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample if num_inference_steps is not None and hasattr(_a , '''set_timesteps''' ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a , '''set_timesteps''' ): snake_case__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.10] snake_case__ = dummy_past_residuals[: scheduler.config.solver_order] snake_case__ = scheduler.timesteps[5] snake_case__ = scheduler.timesteps[6] snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self:int ): # make sure that iterating over schedulers with same config names gives same results # for defaults snake_case__ = UniPCMultistepScheduler(**self.get_scheduler_config() ) snake_case__ = self.full_loop(scheduler=_a ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 snake_case__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) snake_case__ = DEISMultistepScheduler.from_config(scheduler.config ) snake_case__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) snake_case__ = UniPCMultistepScheduler.from_config(scheduler.config ) snake_case__ = self.full_loop(scheduler=_a ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): self.check_over_configs(thresholding=_a ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_a , prediction_type=_a , sample_max_value=_a , solver_order=_a , solver_type=_a , ) def SCREAMING_SNAKE_CASE__ ( self:int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_a , solver_type=_a , prediction_type=_a , ) snake_case__ = self.full_loop( solver_order=_a , solver_type=_a , prediction_type=_a , ) assert not torch.isnan(_a ).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE__ ( self:Any ): self.check_over_configs(lower_order_final=_a ) self.check_over_configs(lower_order_final=_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=_a , time_step=0 ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.full_loop() snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.full_loop(prediction_type='''v_prediction''' ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.1014 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 ) snake_case__ = scheduler_class(**_a ) snake_case__ = 10 snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter.half() scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): snake_case__ = model(_a , _a ) snake_case__ = scheduler.step(_a , _a , _a ).prev_sample assert sample.dtype == torch.floataa def SCREAMING_SNAKE_CASE__ ( self:List[str] , **_a:List[str] ): for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config(**_a ) snake_case__ = scheduler_class(**_a ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
33
import os import sys lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCamelCase__ : Optional[int] = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple: return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
33
1
class __magic_name__ : '''simple docstring''' def __init__( self:Optional[Any] ): snake_case__ = {} # Mapping from char to TrieNode snake_case__ = False def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[str] ): for word in words: self.insert(_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str ): snake_case__ = self for char in word: if char not in curr.nodes: snake_case__ = TrieNode() snake_case__ = curr.nodes[char] snake_case__ = True def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str ): snake_case__ = self for char in word: if char not in curr.nodes: return False snake_case__ = curr.nodes[char] return curr.is_leaf def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ): def _delete(_a:TrieNode , _a:str , _a:int ) -> bool: if index == len(_a ): # If word does not exist if not curr.is_leaf: return False snake_case__ = False return len(curr.nodes ) == 0 snake_case__ = word[index] snake_case__ = curr.nodes.get(_a ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted snake_case__ = _delete(_a , _a , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , _a , 0 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> None: if node.is_leaf: print(__lowerCAmelCase , end=''' ''' ) for key, value in node.nodes.items(): print_words(__lowerCAmelCase , word + key ) def SCREAMING_SNAKE_CASE ( ) -> bool: snake_case__ = '''banana bananas bandana band apple all beast'''.split() snake_case__ = TrieNode() root.insert_many(__lowerCAmelCase ) # print_words(root, "") assert all(root.find(__lowerCAmelCase ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> None: print(str(__lowerCAmelCase ) , '''works!''' if passes else '''doesn\'t work :(''' ) def SCREAMING_SNAKE_CASE ( ) -> None: assert test_trie() def SCREAMING_SNAKE_CASE ( ) -> None: print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
33
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : str = (CMStochasticIterativeScheduler,) __lowercase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ): snake_case__ = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**_a ) return config def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = 10 snake_case__ = self.get_scheduler_config() snake_case__ = self.scheduler_classes[0](**_a ) scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps[0] snake_case__ = scheduler.timesteps[1] snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self:Any ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=_a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = 1 scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(_a ): # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [1_06, 0] scheduler.set_timesteps(timesteps=_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 15, 0] with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 1, 0] snake_case__ = len(_a ) with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [scheduler.config.num_train_timesteps] with self.assertRaises( _a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_a )
33
1
from copy import deepcopy class __magic_name__ : '''simple docstring''' def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ): if arr is None and size is not None: snake_case__ = size snake_case__ = [0] * size elif arr is not None: self.init(_a ) else: raise ValueError('''Either arr or size must be specified''' ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ): snake_case__ = len(_a ) snake_case__ = deepcopy(_a ) for i in range(1 , self.size ): snake_case__ = self.next_(_a ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ = self.next_(_a ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ = self.next_(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): self.add(_a , value - self.get(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ): if right == 0: return 0 snake_case__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ = self.prev(_a ) return result def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): return self.prefix(_a ) - self.prefix(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): return self.query(_a , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): value -= self.tree[0] if value < 0: return -1 snake_case__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
import numpy as np def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return vector * sigmoid(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : int = logging.get_logger(__name__) lowerCamelCase__ : List[Any] = { """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Tuple = 'switch_transformers' __lowercase : Optional[int] = ['past_key_values'] __lowercase : Optional[int] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self:Union[str, Any] , _a:Tuple=3_21_28 , _a:Optional[Any]=7_68 , _a:Dict=64 , _a:Dict=20_48 , _a:Any=64 , _a:Optional[Any]=12 , _a:Tuple=3 , _a:List[Any]=12 , _a:Union[str, Any]=3 , _a:Optional[int]=12 , _a:Dict=8 , _a:Optional[Any]=False , _a:Tuple=0.01 , _a:Tuple="float32" , _a:Dict=False , _a:List[str]=32 , _a:str=1_28 , _a:Tuple=0.1 , _a:List[str]=1e-6 , _a:List[Any]=0.001 , _a:Optional[Any]=0.001 , _a:List[Any]=1.0 , _a:Tuple="relu" , _a:Any=True , _a:Any=False , _a:Dict=True , _a:Any=0 , _a:int=1 , **_a:Optional[Any] , ): snake_case__ = vocab_size snake_case__ = d_model snake_case__ = d_kv snake_case__ = d_ff snake_case__ = num_sparse_encoder_layers snake_case__ = num_layers snake_case__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry snake_case__ = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: snake_case__ = self.num_layers // self.num_sparse_encoder_layers else: snake_case__ = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: snake_case__ = self.num_decoder_layers // self.num_sparse_decoder_layers else: snake_case__ = self.num_decoder_layers # HACK: this will create 0 sparse layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = router_bias snake_case__ = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = relative_attention_num_buckets snake_case__ = relative_attention_max_distance snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = initializer_factor snake_case__ = feed_forward_proj snake_case__ = use_cache snake_case__ = add_router_probs snake_case__ = router_z_loss_coef snake_case__ = router_aux_loss_coef snake_case__ = self.feed_forward_proj.split('''-''' ) snake_case__ = act_info[-1] snake_case__ = act_info[0] == '''gated''' if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": snake_case__ = '''gelu_new''' super().__init__( pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
33
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int: snake_case__ = set() snake_case__ = 0 snake_case__ = n + 1 # maximum limit for a in range(2 , __lowerCAmelCase ): for b in range(2 , __lowerCAmelCase ): snake_case__ = a**b # calculates the current power collect_powers.add(__lowerCAmelCase ) # adds the result to the set return len(__lowerCAmelCase ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
33
1
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = [1] snake_case__ , snake_case__ , snake_case__ = 0, 0, 0 snake_case__ = ugly_nums[ia] * 2 snake_case__ = ugly_nums[ia] * 3 snake_case__ = ugly_nums[ia] * 5 for _ in range(1 , __lowerCAmelCase ): snake_case__ = min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ugly_nums.append(__lowerCAmelCase ) if next_num == next_a: ia += 1 snake_case__ = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 snake_case__ = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 snake_case__ = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F"""{ugly_numbers(2_0_0) = }""")
33
from copy import deepcopy class __magic_name__ : '''simple docstring''' def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ): if arr is None and size is not None: snake_case__ = size snake_case__ = [0] * size elif arr is not None: self.init(_a ) else: raise ValueError('''Either arr or size must be specified''' ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ): snake_case__ = len(_a ) snake_case__ = deepcopy(_a ) for i in range(1 , self.size ): snake_case__ = self.next_(_a ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ = self.next_(_a ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ = self.next_(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): self.add(_a , value - self.get(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ): if right == 0: return 0 snake_case__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ = self.prev(_a ) return result def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): return self.prefix(_a ) - self.prefix(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): return self.query(_a , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): value -= self.tree[0] if value < 0: return -1 snake_case__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
1
def SCREAMING_SNAKE_CASE ( ) -> str: snake_case__ = 0 for i in range(1 , 1001 ): total += i**i return str(__lowerCAmelCase )[-10:] if __name__ == "__main__": print(solution())
33
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __magic_name__ : '''simple docstring''' __lowercase : int = BlenderbotConfig __lowercase : Any = {} __lowercase : Optional[Any] = 'gelu' def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a ) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ): snake_case__ = TFBlenderbotModel(config=_a ).get_decoder() snake_case__ = inputs_dict['''input_ids'''] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict['''attention_mask'''][:1, :] snake_case__ = inputs_dict['''head_mask'''] snake_case__ = 1 # first forward pass snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case__ = model(_a , attention_mask=_a )[0] snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple: if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowercase : Tuple = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Any = True __lowercase : int = False __lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFBlenderbotModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_tokenizers @require_tf class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.'] __lowercase : Optional[int] = 'facebook/blenderbot-400M-distill' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) snake_case__ = self.model.generate( model_inputs.input_ids , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
33
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:List[str] , *_a:List[Any] , **_a:Optional[Any] ): warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , _a , ) super().__init__(*_a , **_a )
33
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = 0 def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:str ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop('''image_processor_type''' ) snake_case__ = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved snake_case__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): with self.assertRaisesRegex( _a , '''clip-base is not a local folder and is not a valid model identifier''' ): snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): with self.assertRaisesRegex( _a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = True try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
33
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = ['image_processor', 'tokenizer'] __lowercase : List[str] = 'BlipImageProcessor' __lowercase : int = 'AutoTokenizer' def __init__( self:List[str] , _a:Union[str, Any] , _a:Any ): snake_case__ = False super().__init__(_a , _a ) snake_case__ = self.image_processor def __call__( self:List[str] , _a:ImageInput = None , _a:Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a:bool = True , _a:Union[bool, str, PaddingStrategy] = False , _a:Union[bool, str, TruncationStrategy] = None , _a:Optional[int] = None , _a:int = 0 , _a:Optional[int] = None , _a:Optional[bool] = None , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = True , _a:Optional[Union[str, TensorType]] = None , **_a:List[str] , ): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: snake_case__ = self.tokenizer snake_case__ = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) return text_encoding # add pixel_values snake_case__ = self.image_processor(_a , return_tensors=_a ) if text is not None: snake_case__ = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) else: snake_case__ = None if text_encoding is not None: encoding_image_processor.update(_a ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:str ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , *_a:str , **_a:List[Any] ): return self.tokenizer.decode(*_a , **_a ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.tokenizer.model_input_names snake_case__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
33
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int: snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ = '''''' else: snake_case__ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: snake_case__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: snake_case__ = dct.pop(__lowerCAmelCase ) snake_case__ = val def SCREAMING_SNAKE_CASE ( ) -> str: snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = ViTConfig() snake_case__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case__ = True snake_case__ = int(vit_name[-12:-10] ) snake_case__ = int(vit_name[-9:-6] ) else: snake_case__ = 1000 snake_case__ = '''huggingface/label-files''' snake_case__ = '''imagenet-1k-id2label.json''' snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(vit_name[-6:-4] ) snake_case__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif vit_name[9:].startswith('''small''' ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 else: pass else: if vit_name[4:].startswith('''small''' ): snake_case__ = 768 snake_case__ = 2304 snake_case__ = 8 snake_case__ = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 elif vit_name[4:].startswith('''huge''' ): snake_case__ = 1280 snake_case__ = 5120 snake_case__ = 32 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() if base_model: remove_classification_head_(__lowerCAmelCase ) snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ = ViTModel(__lowerCAmelCase ).eval() else: snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval() model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case__ = DeiTImageProcessor(size=config.image_size ) else: snake_case__ = ViTImageProcessor(size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case__ = encoding['''pixel_values'''] snake_case__ = model(__lowerCAmelCase ) if base_model: snake_case__ = timm_model.forward_features(__lowerCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 ) else: snake_case__ = timm_model(__lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase__ : str = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
33
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ : Union[str, Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = ['image_processor', 'tokenizer'] __lowercase : str = 'AutoImageProcessor' __lowercase : Dict = 'AutoTokenizer' def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ): snake_case__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) snake_case__ = kwargs.pop('''feature_extractor''' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) snake_case__ = self.image_processor snake_case__ = False def __call__( self:Optional[int] , *_a:str , **_a:int ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) snake_case__ = kwargs.pop('''images''' , _a ) snake_case__ = kwargs.pop('''text''' , _a ) if len(_a ) > 0: snake_case__ = args[0] snake_case__ = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case__ = self.image_processor(_a , *_a , **_a ) if text is not None: snake_case__ = self.tokenizer(_a , **_a ) if text is None: return inputs elif images is None: return encodings else: snake_case__ = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ): return self.tokenizer.decode(*_a , **_a ) @contextmanager def SCREAMING_SNAKE_CASE__ ( self:Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) snake_case__ = True snake_case__ = self.tokenizer yield snake_case__ = self.image_processor snake_case__ = False def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ): if added_vocab is None: snake_case__ = self.tokenizer.get_added_vocab() snake_case__ = {} while tokens: snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE ) if start_token is None: break snake_case__ = start_token.group(1 ) snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE ) snake_case__ = start_token.group() if end_token is None: snake_case__ = tokens.replace(_a , '''''' ) else: snake_case__ = end_token.group() snake_case__ = re.escape(_a ) snake_case__ = re.escape(_a ) snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE ) if content is not None: snake_case__ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a ) if value: if len(_a ) == 1: snake_case__ = value[0] snake_case__ = value else: # leaf nodes snake_case__ = [] for leaf in content.split(r'''<sep/>''' ): snake_case__ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": snake_case__ = leaf[1:-2] # for categorical special tokens output[key].append(_a ) if len(output[key] ) == 1: snake_case__ = output[key][0] snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a ) if len(_a ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
33
1
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = _distribute_shards(**__lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if expected is RuntimeError: with pytest.raises(__lowerCAmelCase ): _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) else: snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) assert out == expected
33
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : '''simple docstring''' def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = num_channels snake_case__ = embeddings_size snake_case__ = hidden_sizes snake_case__ = depths snake_case__ = is_training snake_case__ = use_labels snake_case__ = hidden_act snake_case__ = num_labels snake_case__ = scope snake_case__ = len(_a ) snake_case__ = out_features snake_case__ = out_indices snake_case__ = num_groups def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ): snake_case__ = BitModel(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ): snake_case__ = self.num_labels snake_case__ = BitForImageClassification(_a ) model.to(_a ) model.eval() snake_case__ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ): snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ = None snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ = config_and_inputs snake_case__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowercase : int = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) __lowercase : Tuple = False __lowercase : Optional[Any] = False __lowercase : str = False __lowercase : Tuple = False __lowercase : Tuple = False def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = BitModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return @unittest.skip(reason='''Bit does not output attentions''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(_a ) snake_case__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(config=_a ) for name, module in model.named_modules(): if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ): snake_case__ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): snake_case__ = model(**self._prepare_for_class(_a , _a ) ) snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ = self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ = layer_type snake_case__ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ = True check_hidden_states_output(_a , _a , _a ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = BitModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def SCREAMING_SNAKE_CASE ( ) -> Any: snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a ) # forward pass with torch.no_grad(): snake_case__ = model(**_a ) # verify the logits snake_case__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _a ) snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @require_torch class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else () __lowercase : int = BitConfig __lowercase : Any = False def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = BitModelTester(self )
33
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> None: snake_case__ , snake_case__ = analyze_text(__lowerCAmelCase ) snake_case__ = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. snake_case__ = sum(single_char_strings.values() ) # one length string snake_case__ = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: snake_case__ = single_char_strings[ch] snake_case__ = my_str / all_sum my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string snake_case__ = sum(two_char_strings.values() ) snake_case__ = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: snake_case__ = cha + cha if sequence in two_char_strings: snake_case__ = two_char_strings[sequence] snake_case__ = int(__lowerCAmelCase ) / all_sum my_sec_sum += prob * math.loga(__lowerCAmelCase ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> tuple[dict, dict]: snake_case__ = Counter() # type: ignore snake_case__ = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__lowerCAmelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def SCREAMING_SNAKE_CASE ( ) -> int: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCamelCase__ : Any = """\ """ lowerCamelCase__ : List[str] = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ lowerCamelCase__ : Any = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case__ = '''cuda''' else: snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' snake_case__ = AutoModelForCausalLM.from_pretrained(_a ) snake_case__ = model.to(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case__ = model.config.max_length - 1 else: snake_case__ = model.config.max_length snake_case__ = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) snake_case__ = encodings['''input_ids'''] snake_case__ = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case__ = [] snake_case__ = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): snake_case__ = min(start_index + batch_size , len(_a ) ) snake_case__ = encoded_texts[start_index:end_index] snake_case__ = attn_masks[start_index:end_index] if add_start_token: snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) snake_case__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) snake_case__ = encoded_batch with torch.no_grad(): snake_case__ = model(_a , attention_mask=_a ).logits snake_case__ = out_logits[..., :-1, :].contiguous() snake_case__ = labels[..., 1:].contiguous() snake_case__ = attn_mask[..., 1:].contiguous() snake_case__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Union[str, Any] = 'trocr' __lowercase : Optional[Any] = ['past_key_values'] __lowercase : Union[str, Any] = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self:Optional[Any] , _a:Optional[int]=5_02_65 , _a:List[Any]=10_24 , _a:Union[str, Any]=12 , _a:Any=16 , _a:int=40_96 , _a:int="gelu" , _a:Union[str, Any]=5_12 , _a:str=0.1 , _a:Dict=0.0 , _a:int=0.0 , _a:int=2 , _a:Union[str, Any]=0.02 , _a:List[Any]=0.0 , _a:Any=True , _a:Optional[Any]=False , _a:Union[str, Any]=True , _a:List[Any]=True , _a:Any=1 , _a:str=0 , _a:Optional[int]=2 , **_a:int , ): snake_case__ = vocab_size snake_case__ = d_model snake_case__ = decoder_layers snake_case__ = decoder_attention_heads snake_case__ = decoder_ffn_dim snake_case__ = activation_function snake_case__ = max_position_embeddings snake_case__ = dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = init_std snake_case__ = decoder_layerdrop snake_case__ = use_cache snake_case__ = scale_embedding snake_case__ = use_learned_position_embeddings snake_case__ = layernorm_embedding super().__init__( pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
33
import os from datetime import datetime as dt from github import Github lowerCamelCase__ : int = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] ) snake_case__ = g.get_repo('''huggingface/diffusers''' ) snake_case__ = repo.get_issues(state='''open''' ) for issue in open_issues: snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
33
1
import torch from diffusers import StableDiffusionPipeline lowerCamelCase__ : Any = """path-to-your-trained-model""" lowerCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") lowerCamelCase__ : int = """A photo of sks dog in a bucket""" lowerCamelCase__ : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = _distribute_shards(**__lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if expected is RuntimeError: with pytest.raises(__lowerCAmelCase ): _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) else: snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) assert out == expected
33
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class __magic_name__ : '''simple docstring''' __lowercase : Optional[int] = MBartConfig __lowercase : Tuple = {} __lowercase : Optional[int] = 'gelu' def __init__( self:Tuple , _a:int , _a:List[Any]=13 , _a:List[str]=7 , _a:Tuple=True , _a:Union[str, Any]=False , _a:Any=99 , _a:List[Any]=32 , _a:List[Any]=2 , _a:int=4 , _a:Dict=37 , _a:int=0.1 , _a:List[str]=0.1 , _a:Any=20 , _a:Any=2 , _a:List[str]=1 , _a:List[str]=0 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_mbart_inputs_dict(_a , _a , _a ) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Tuple , _a:Any ): snake_case__ = TFMBartModel(config=_a ).get_decoder() snake_case__ = inputs_dict['''input_ids'''] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict['''attention_mask'''][:1, :] snake_case__ = inputs_dict['''head_mask'''] snake_case__ = 1 # first forward pass snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) snake_case__ , snake_case__ = outputs.to_tuple() snake_case__ = past_key_values[1] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[str]: if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __lowercase : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else () __lowercase : Optional[int] = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Optional[int] = True __lowercase : Tuple = False __lowercase : List[str] = False def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str , _a:Optional[int] , _a:Optional[int] , _a:List[Any] , _a:int ): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = TFMBartModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_sentencepiece @require_tokenizers @require_tf class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : str = [ ' UN Chief Says There Is No Military Solution in Syria', ] __lowercase : Union[str, Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] __lowercase : Any = 'facebook/mbart-large-en-ro' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def SCREAMING_SNAKE_CASE__ ( self:List[Any] , **_a:Optional[Any] ): snake_case__ = self.translate_src_text(**_a ) self.assertListEqual(self.expected_text , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:Optional[int] ): snake_case__ = self.tokenizer(self.src_text , **_a , return_tensors='''tf''' ) snake_case__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) snake_case__ = self.tokenizer.batch_decode(_a , skip_special_tokens=_a ) return generated_words @slow def SCREAMING_SNAKE_CASE__ ( self:int ): self._assert_generated_batch_equal_expected()
33
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : str = IFImgaImgSuperResolutionPipeline __lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} __lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) __lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'} def SCREAMING_SNAKE_CASE__ ( self:Dict ): return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:str ): self._test_save_load_local() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
33
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 3 ) -> qiskit.result.counts.Counts: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(__lowerCAmelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) snake_case__ = QuantumRegister(__lowerCAmelCase , '''qr''' ) snake_case__ = ClassicalRegister(__lowerCAmelCase , '''cr''' ) snake_case__ = QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = number_of_qubits for i in range(__lowerCAmelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__lowerCAmelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCAmelCase , __lowerCAmelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__lowerCAmelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__lowerCAmelCase , __lowerCAmelCase ) # simulate with 10000 shots snake_case__ = Aer.get_backend('''qasm_simulator''' ) snake_case__ = execute(__lowerCAmelCase , __lowerCAmelCase , shots=1_0000 ) return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
33
import math class __magic_name__ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ): snake_case__ = 0.0 snake_case__ = 0.0 for i in range(len(_a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ): for i in range(len(_a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def SCREAMING_SNAKE_CASE ( ) -> None: # Training Examples ( m, n ) snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case__ = SelfOrganizingMap() snake_case__ = 3 snake_case__ = 0.5 for _ in range(__lowerCAmelCase ): for j in range(len(__lowerCAmelCase ) ): # training sample snake_case__ = training_samples[j] # Compute the winning vector snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # Update the winning vector snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # classify test sample snake_case__ = [0, 0, 0, 1] snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
33
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list: snake_case__ = [] snake_case__ , snake_case__ = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) snake_case__ = result + left + right return input_list def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list: if len(__lowerCAmelCase ) <= 1: return input_list snake_case__ = list(__lowerCAmelCase ) # iteration for two-way merging snake_case__ = 2 while p <= len(__lowerCAmelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ): snake_case__ = i snake_case__ = i + p - 1 snake_case__ = (low + high + 1) // 2 snake_case__ = merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # final merge of last two parts if p * 2 >= len(__lowerCAmelCase ): snake_case__ = i snake_case__ = merge(__lowerCAmelCase , 0 , __lowerCAmelCase , len(__lowerCAmelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": lowerCamelCase__ : str = [] else: lowerCamelCase__ : List[Any] = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
33
from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes snake_case__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] snake_case__ = [] snake_case__ = 0 snake_case__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: snake_case__ = [] snake_case__ = -1 for i in range(__lowerCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: snake_case__ = i total_time += burst_time[target_process] completed += 1 snake_case__ = 0 snake_case__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7] lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0] lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
33
1
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: snake_case__ = SwinConfig() snake_case__ = swin_name.split('''_''' ) snake_case__ = name_split[1] snake_case__ = int(name_split[4] ) snake_case__ = int(name_split[3][-1] ) if model_size == "tiny": snake_case__ = 96 snake_case__ = (2, 2, 6, 2) snake_case__ = (3, 6, 12, 24) elif model_size == "small": snake_case__ = 96 snake_case__ = (2, 2, 18, 2) snake_case__ = (3, 6, 12, 24) elif model_size == "base": snake_case__ = 128 snake_case__ = (2, 2, 18, 2) snake_case__ = (4, 8, 16, 32) else: snake_case__ = 192 snake_case__ = (2, 2, 18, 2) snake_case__ = (6, 12, 24, 48) if "in22k" in swin_name: snake_case__ = 2_1841 else: snake_case__ = 1000 snake_case__ = '''huggingface/label-files''' snake_case__ = '''imagenet-1k-id2label.json''' snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = img_size snake_case__ = num_classes snake_case__ = embed_dim snake_case__ = depths snake_case__ = num_heads snake_case__ = window_size return config def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: if "patch_embed.proj" in name: snake_case__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: snake_case__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: snake_case__ = '''encoder.''' + name if "attn.proj" in name: snake_case__ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: snake_case__ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: snake_case__ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: snake_case__ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: snake_case__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case__ = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": snake_case__ = '''layernorm.weight''' if name == "norm.bias": snake_case__ = '''layernorm.bias''' if "head" in name: snake_case__ = name.replace('''head''' , '''classifier''' ) else: snake_case__ = '''swin.''' + name return name def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: for key in orig_state_dict.copy().keys(): snake_case__ = orig_state_dict.pop(__lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: snake_case__ = key.split('''.''' ) snake_case__ = int(key_split[1] ) snake_case__ = int(key_split[3] ) snake_case__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case__ = val[:dim, :] snake_case__ = val[ dim : dim * 2, : ] snake_case__ = val[-dim:, :] else: snake_case__ = val[ :dim ] snake_case__ = val[ dim : dim * 2 ] snake_case__ = val[ -dim: ] else: snake_case__ = val return orig_state_dict def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ) timm_model.eval() snake_case__ = get_swin_config(__lowerCAmelCase ) snake_case__ = SwinForImageClassification(__lowerCAmelCase ) model.eval() snake_case__ = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case__ = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) snake_case__ = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ) snake_case__ = timm_model(inputs['''pixel_values'''] ) snake_case__ = model(**__lowerCAmelCase ).logits assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase__ : Dict = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
33
lowerCamelCase__ : List[str] = """Alexander Joslin""" import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} snake_case__ = Stack() snake_case__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCAmelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCAmelCase ) elif i == ")": # RULE 4 snake_case__ = operator_stack.peek() operator_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase ) operand_stack.push(__lowerCAmelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
33
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() snake_case__ = dict(zip(_a , range(len(_a ) ) ) ) snake_case__ = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } snake_case__ = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_60_00, '''return_attention_mask''': False, '''do_normalize''': True, } snake_case__ = tempfile.mkdtemp() snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ = os.path.join(self.tmpdirname , _a ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) # load decoder from hub snake_case__ = '''hf-internal-testing/ngram-beam-search-decoder''' def SCREAMING_SNAKE_CASE__ ( self:str , **_a:List[Any] ): snake_case__ = self.add_kwargs_tokens_map.copy() kwargs.update(_a ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[str] ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , **_a:int ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.get_tokenizer() snake_case__ = self.get_feature_extractor() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _a ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _a ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match snake_case__ = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_a , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.get_feature_extractor() snake_case__ = self.get_tokenizer() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) snake_case__ = floats_list((3, 10_00) ) snake_case__ = feature_extractor(_a , return_tensors='''np''' ) snake_case__ = processor(_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.get_feature_extractor() snake_case__ = self.get_tokenizer() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) snake_case__ = '''This is a test string''' snake_case__ = processor(text=_a ) snake_case__ = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int=(2, 10, 16) , _a:Dict=77 ): np.random.seed(_a ) return np.random.rand(*_a ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.get_feature_extractor() snake_case__ = self.get_tokenizer() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) snake_case__ = self._get_dummy_logits(shape=(10, 16) , seed=13 ) snake_case__ = processor.decode(_a ) snake_case__ = decoder.decode_beams(_a )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Union[str, Any] ): snake_case__ = self.get_feature_extractor() snake_case__ = self.get_tokenizer() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) snake_case__ = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: snake_case__ = processor.batch_decode(_a ) else: with get_context(_a ).Pool() as pool: snake_case__ = processor.batch_decode(_a , _a ) snake_case__ = list(_a ) with get_context('''fork''' ).Pool() as p: snake_case__ = decoder.decode_beams_batch(_a , _a ) snake_case__ , snake_case__ , snake_case__ = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_a , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_a , decoded_processor.logit_score ) self.assertListEqual(_a , decoded_processor.lm_score ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_feature_extractor() snake_case__ = self.get_tokenizer() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) snake_case__ = self._get_dummy_logits() snake_case__ = 15 snake_case__ = -20.0 snake_case__ = -4.0 snake_case__ = processor.batch_decode( _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) snake_case__ = decoded_processor_out.text snake_case__ = list(_a ) with get_context('''fork''' ).Pool() as pool: snake_case__ = decoder.decode_beams_batch( _a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) snake_case__ = [d[0][0] for d in decoded_decoder_out] snake_case__ = [d[0][2] for d in decoded_decoder_out] snake_case__ = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _a ) self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1e-3 ) ) self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , _a , atol=1e-3 ) ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.get_feature_extractor() snake_case__ = self.get_tokenizer() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) snake_case__ = self._get_dummy_logits() snake_case__ = 2.0 snake_case__ = 5.0 snake_case__ = -20.0 snake_case__ = True snake_case__ = processor.batch_decode( _a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) snake_case__ = decoded_processor_out.text snake_case__ = list(_a ) decoder.reset_params( alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) with get_context('''fork''' ).Pool() as pool: snake_case__ = decoder.decode_beams_batch( _a , _a , ) snake_case__ = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _a ) snake_case__ = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case__ = processor.decoder.model_container[processor.decoder._model_key] snake_case__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() snake_case__ = os.listdir(_a ) snake_case__ = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = snapshot_download('''hf-internal-testing/processor_with_lm''' ) snake_case__ = WavaVecaProcessorWithLM.from_pretrained(_a ) snake_case__ = processor.decoder.model_container[processor.decoder._model_key] snake_case__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() snake_case__ = os.listdir(_a ) snake_case__ = os.listdir(_a ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case__ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case__ = floats_list((3, 10_00) ) snake_case__ = processor_wavaveca(_a , return_tensors='''np''' ) snake_case__ = processor_auto(_a , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) snake_case__ = self._get_dummy_logits() snake_case__ = processor_wavaveca.batch_decode(_a ) snake_case__ = processor_auto.batch_decode(_a ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = self.get_feature_extractor() snake_case__ = self.get_tokenizer() snake_case__ = self.get_decoder() snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:Union[str, Any] , _a:Dict ): snake_case__ = [d[key] for d in offsets] return retrieved_list def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case__ = self._get_dummy_logits()[0] snake_case__ = processor.decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case__ = self._get_dummy_logits() snake_case__ = processor.batch_decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_a , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def SCREAMING_SNAKE_CASE__ ( self:Any ): import torch snake_case__ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_a ) snake_case__ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) ) snake_case__ = iter(_a ) snake_case__ = next(_a ) snake_case__ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) snake_case__ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train snake_case__ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): snake_case__ = model(_a ).logits.cpu().numpy() snake_case__ = processor.decode(logits[0] , output_word_offsets=_a ) snake_case__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate snake_case__ = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] snake_case__ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , _a ) self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , output.text ) # output times snake_case__ = torch.tensor(self.get_from_offsets(_a , '''start_time''' ) ) snake_case__ = torch.tensor(self.get_from_offsets(_a , '''end_time''' ) ) # fmt: off snake_case__ = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) snake_case__ = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) ) self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
33
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCamelCase__ : int = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ): warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _a , ) super().__init__(*_a , **_a )
33
1
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __magic_name__ (unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) snake_case__ = AutoTokenizer.from_pretrained('''google/mt5-small''' ) snake_case__ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids snake_case__ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids snake_case__ = shift_tokens_right(_a , model.config.pad_token_id , model.config.decoder_start_token_id ) snake_case__ = model(_a , decoder_input_ids=_a ).logits snake_case__ = optax.softmax_cross_entropy(_a , onehot(_a , logits.shape[-1] ) ).mean() snake_case__ = -(labels.shape[-1] * loss.item()) snake_case__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ : Tuple = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowerCamelCase__ : List[str] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") lowerCamelCase__ : List[str] = ( subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split() ) lowerCamelCase__ : List[str] = """|""".join(sys.argv[1:]) lowerCamelCase__ : Optional[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""") lowerCamelCase__ : List[Any] = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
33
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]: snake_case__ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Dict = StableDiffusionLatentUpscalePipeline __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } __lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} __lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowercase : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __lowercase : List[Any] = frozenset([] ) __lowercase : Any = True @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = 1 snake_case__ = 4 snake_case__ = (16, 16) snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a ) return image def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): torch.manual_seed(0 ) snake_case__ = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) snake_case__ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) snake_case__ = CLIPTextModel(_a ) snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''cpu''' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) snake_case__ = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): super().test_save_load_local(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = 2 snake_case__ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue snake_case__ = getattr(_a , scheduler_enum.name ) snake_case__ = scheduler_cls.from_config(pipe.scheduler.config ) snake_case__ = pipe(**_a )[0] outputs.append(_a ) assert check_same_shape(_a ) @require_torch_gpu @slow class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' snake_case__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-2
33
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) lowerCamelCase__ : List[Any] = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]: snake_case__ = {} state_dict.pop('''pixel_mean''' , __lowerCAmelCase ) state_dict.pop('''pixel_std''' , __lowerCAmelCase ) snake_case__ = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: snake_case__ = key.replace(__lowerCAmelCase , __lowerCAmelCase ) if re.match(__lowerCAmelCase , __lowerCAmelCase ): snake_case__ = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(2 ) ) if layer_nb == 0: snake_case__ = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: snake_case__ = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: snake_case__ = key.replace('''layers.2''' , '''proj_out''' ) snake_case__ = value snake_case__ = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="ybelkada/segment-anything" ) -> Tuple: snake_case__ = hf_hub_download(__lowerCAmelCase , F"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: snake_case__ = SamConfig() elif "sam_vit_l" in model_name: snake_case__ = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) snake_case__ = SamConfig( vision_config=__lowerCAmelCase , ) elif "sam_vit_h" in model_name: snake_case__ = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) snake_case__ = SamConfig( vision_config=__lowerCAmelCase , ) snake_case__ = torch.load(__lowerCAmelCase , map_location='''cpu''' ) snake_case__ = replace_keys(__lowerCAmelCase ) snake_case__ = SamImageProcessor() snake_case__ = SamProcessor(image_processor=__lowerCAmelCase ) snake_case__ = SamModel(__lowerCAmelCase ) hf_model.load_state_dict(__lowerCAmelCase ) snake_case__ = hf_model.to('''cuda''' ) snake_case__ = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' ) snake_case__ = [[[400, 650]]] snake_case__ = [[1]] snake_case__ = processor(images=np.array(__lowerCAmelCase ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 snake_case__ = processor( images=np.array(__lowerCAmelCase ) , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 snake_case__ = ((75, 275, 1725, 850),) snake_case__ = processor(images=np.array(__lowerCAmelCase ) , input_boxes=__lowerCAmelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. snake_case__ = [[[400, 650], [800, 650]]] snake_case__ = [[1, 1]] snake_case__ = processor( images=np.array(__lowerCAmelCase ) , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": lowerCamelCase__ : Tuple = argparse.ArgumentParser() lowerCamelCase__ : int = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) lowerCamelCase__ : Union[str, Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
33
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''ZinengTang/tvlt-base''' snake_case__ = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _a ) self.assertIsInstance(processor.image_processor , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = feature_extractor(_a , return_tensors='''np''' ) snake_case__ = processor(audio=_a , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = image_processor(_a , return_tensors='''np''' ) snake_case__ = processor(images=_a , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = processor(audio=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
33
1
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __magic_name__ : '''simple docstring''' __lowercase : int = BlenderbotConfig __lowercase : Any = {} __lowercase : Optional[Any] = 'gelu' def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a ) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ): snake_case__ = TFBlenderbotModel(config=_a ).get_decoder() snake_case__ = inputs_dict['''input_ids'''] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict['''attention_mask'''][:1, :] snake_case__ = inputs_dict['''head_mask'''] snake_case__ = 1 # first forward pass snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case__ = model(_a , attention_mask=_a )[0] snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple: if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowercase : Tuple = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Any = True __lowercase : int = False __lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFBlenderbotModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_tokenizers @require_tf class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.'] __lowercase : Optional[int] = 'facebook/blenderbot-400M-distill' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) snake_case__ = self.model.generate( model_inputs.input_ids , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'data2vec-vision' def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ): super().__init__(**_a ) snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = use_mask_token snake_case__ = use_absolute_position_embeddings snake_case__ = use_relative_position_bias snake_case__ = use_shared_relative_position_bias snake_case__ = layer_scale_init_value snake_case__ = drop_path_rate snake_case__ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case__ = out_indices snake_case__ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case__ = use_auxiliary_head snake_case__ = auxiliary_loss_weight snake_case__ = auxiliary_channels snake_case__ = auxiliary_num_convs snake_case__ = auxiliary_concat_input snake_case__ = semantic_loss_ignore_index class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Any = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return 1e-4
33
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: if len(__lowerCAmelCase ) < k or k < 0: raise ValueError('''Invalid Input''' ) snake_case__ = snake_case__ = sum(array[:k] ) for i in range(len(__lowerCAmelCase ) - k ): snake_case__ = current_sum - array[i] + array[i + k] snake_case__ = max(__lowerCAmelCase , __lowerCAmelCase ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() lowerCamelCase__ : List[Any] = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)] lowerCamelCase__ : List[Any] = randint(0, 1_1_0) print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
33
import os import sys lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCamelCase__ : Optional[int] = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple: return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
33
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase__ : Dict = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : str = (CMStochasticIterativeScheduler,) __lowercase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ): snake_case__ = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**_a ) return config def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = 10 snake_case__ = self.get_scheduler_config() snake_case__ = self.scheduler_classes[0](**_a ) scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps[0] snake_case__ = scheduler.timesteps[1] snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self:Any ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=_a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = 1 scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(_a ): # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [1_06, 0] scheduler.set_timesteps(timesteps=_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 15, 0] with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 1, 0] snake_case__ = len(_a ) with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [scheduler.config.num_train_timesteps] with self.assertRaises( _a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_a )
33
1
from __future__ import annotations from typing import Any class __magic_name__ : '''simple docstring''' def __init__( self:Tuple , _a:int ): snake_case__ = num_of_nodes snake_case__ = [] snake_case__ = {} def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:int , _a:int , _a:int ): self.m_edges.append([u_node, v_node, weight] ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:int ): if self.m_component[u_node] != u_node: for k in self.m_component: snake_case__ = self.find_component(_a ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] , _a:int , _a:int ): if component_size[u_node] <= component_size[v_node]: snake_case__ = v_node component_size[v_node] += component_size[u_node] self.set_component(_a ) elif component_size[u_node] >= component_size[v_node]: snake_case__ = self.find_component(_a ) component_size[u_node] += component_size[v_node] self.set_component(_a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = [] snake_case__ = 0 snake_case__ = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) snake_case__ = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: snake_case__ , snake_case__ , snake_case__ = edge snake_case__ = self.m_component[u] snake_case__ = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): snake_case__ = [u, v, w] for edge in minimum_weight_edge: if isinstance(_a , _a ): snake_case__ , snake_case__ , snake_case__ = edge snake_case__ = self.m_component[u] snake_case__ = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_a , _a , _a ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 snake_case__ = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def SCREAMING_SNAKE_CASE ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
33
import numpy as np def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return vector * sigmoid(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
33
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase__ : str = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[Any] = ['pixel_values'] def __init__( self:Dict , _a:bool = True , _a:Dict[str, int] = None , _a:float = None , _a:PILImageResampling = PILImageResampling.BILINEAR , _a:bool = True , _a:Union[int, float] = 1 / 2_55 , _a:bool = True , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , **_a:Union[str, Any] , ): super().__init__(**_a ) snake_case__ = size if size is not None else {'''shortest_edge''': 3_84} snake_case__ = get_size_dict(_a , default_to_square=_a ) snake_case__ = do_resize snake_case__ = size # Default value set here for backwards compatibility where the value in config is None snake_case__ = crop_pct if crop_pct is not None else 2_24 / 2_56 snake_case__ = resample snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:np.ndarray , _a:Dict[str, int] , _a:float , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Optional[int] , ): snake_case__ = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) snake_case__ = size['''shortest_edge'''] if shortest_edge < 3_84: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct snake_case__ = int(shortest_edge / crop_pct ) snake_case__ = get_resize_output_image_size(_a , size=_a , default_to_square=_a ) snake_case__ = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:np.ndarray , _a:Union[int, float] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:int , ): return rescale(_a , scale=_a , data_format=_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:np.ndarray , _a:Union[float, List[float]] , _a:Union[float, List[float]] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Tuple , ): return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:ImageInput , _a:bool = None , _a:Dict[str, int] = None , _a:float = None , _a:PILImageResampling = None , _a:bool = None , _a:float = None , _a:bool = None , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[str, TensorType]] = None , _a:ChannelDimension = ChannelDimension.FIRST , **_a:Any , ): snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = crop_pct if crop_pct is not None else self.crop_pct snake_case__ = resample if resample is not None else self.resample snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(_a , default_to_square=_a ) snake_case__ = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(_a ) for image in images] if do_resize: snake_case__ = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] snake_case__ = [to_channel_dimension_format(_a , _a ) for image in images] snake_case__ = {'''pixel_values''': images} return BatchFeature(data=_a , tensor_type=_a )
33
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int: snake_case__ = set() snake_case__ = 0 snake_case__ = n + 1 # maximum limit for a in range(2 , __lowerCAmelCase ): for b in range(2 , __lowerCAmelCase ): snake_case__ = a**b # calculates the current power collect_powers.add(__lowerCAmelCase ) # adds the result to the set return len(__lowerCAmelCase ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
33
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Tuple = StableDiffusionPanoramaPipeline __lowercase : int = TEXT_TO_IMAGE_PARAMS __lowercase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS __lowercase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__ ( self:Tuple ): torch.manual_seed(0 ) snake_case__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case__ = DDIMScheduler() torch.manual_seed(0 ) snake_case__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) snake_case__ = CLIPTextModel(_a ) snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:str , _a:Optional[Any]=0 ): snake_case__ = torch.manual_seed(_a ) snake_case__ = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ = self.get_dummy_components() snake_case__ = StableDiffusionPanoramaPipeline(**_a ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = sd_pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ = self.get_dummy_components() snake_case__ = StableDiffusionPanoramaPipeline(**_a ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = '''french fries''' snake_case__ = sd_pipe(**_a , negative_prompt=_a ) snake_case__ = output.images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ = self.get_dummy_components() snake_case__ = StableDiffusionPanoramaPipeline(**_a ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = sd_pipe(**_a , view_batch_size=2 ) snake_case__ = output.images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ = self.get_dummy_components() snake_case__ = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) snake_case__ = StableDiffusionPanoramaPipeline(**_a ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = sd_pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ = self.get_dummy_components() snake_case__ = PNDMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_a ) snake_case__ = StableDiffusionPanoramaPipeline(**_a ) snake_case__ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = sd_pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Any=0 ): snake_case__ = torch.manual_seed(_a ) snake_case__ = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''stabilityai/stable-diffusion-2-base''' snake_case__ = DDIMScheduler.from_pretrained(_a , subfolder='''scheduler''' ) snake_case__ = StableDiffusionPanoramaPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() snake_case__ = self.get_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) snake_case__ = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=_a ) snake_case__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() snake_case__ = self.get_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) snake_case__ = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = 0 def callback_fn(_a:int , _a:int , _a:torch.FloatTensor ) -> None: snake_case__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) snake_case__ = latents[0, -3:, -3:, -1] snake_case__ = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: snake_case__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) snake_case__ = latents[0, -3:, -3:, -1] snake_case__ = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 snake_case__ = False snake_case__ = '''stabilityai/stable-diffusion-2-base''' snake_case__ = DDIMScheduler.from_pretrained(_a , subfolder='''scheduler''' ) snake_case__ = StableDiffusionPanoramaPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a ) snake_case__ = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() snake_case__ = self.get_inputs() pipe(**_a , callback=_a , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ = '''stabilityai/stable-diffusion-2-base''' snake_case__ = DDIMScheduler.from_pretrained(_a , subfolder='''scheduler''' ) snake_case__ = StableDiffusionPanoramaPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a ) snake_case__ = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ = self.get_inputs() snake_case__ = pipe(**_a ) snake_case__ = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
33
from copy import deepcopy class __magic_name__ : '''simple docstring''' def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ): if arr is None and size is not None: snake_case__ = size snake_case__ = [0] * size elif arr is not None: self.init(_a ) else: raise ValueError('''Either arr or size must be specified''' ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ): snake_case__ = len(_a ) snake_case__ = deepcopy(_a ) for i in range(1 , self.size ): snake_case__ = self.next_(_a ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ = self.next_(_a ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ = self.next_(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): self.add(_a , value - self.get(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ): if right == 0: return 0 snake_case__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ = self.prev(_a ) return result def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): return self.prefix(_a ) - self.prefix(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): return self.query(_a , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): value -= self.tree[0] if value < 0: return -1 snake_case__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
1
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: snake_case__ = np.max(_outputs , axis=-1 , keepdims=__lowerCAmelCase ) snake_case__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCAmelCase ) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'sigmoid' __lowercase : int = 'softmax' __lowercase : Optional[Any] = 'none' @add_end_docstrings( snake_case_ ,R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' ,) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = False __lowercase : List[str] = ClassificationFunction.NONE def __init__( self:Optional[Any] , **_a:Union[str, Any] ): super().__init__(**_a ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int=None , _a:str=None , _a:Union[str, Any]="" , **_a:Tuple ): # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" snake_case__ = tokenizer_kwargs snake_case__ = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: snake_case__ = self.model.config.return_all_scores if isinstance(_a , _a ) or top_k is None: snake_case__ = top_k snake_case__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _a , ) if return_all_scores: snake_case__ = None else: snake_case__ = 1 if isinstance(_a , _a ): snake_case__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self:List[str] , *_a:str , **_a:Optional[int] ): snake_case__ = super().__call__(*_a , **_a ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case__ = '''top_k''' not in kwargs if isinstance(args[0] , _a ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Optional[int] , **_a:str ): snake_case__ = self.framework if isinstance(_a , _a ): return self.tokenizer(**_a , return_tensors=_a , **_a ) elif isinstance(_a , _a ) and len(_a ) == 1 and isinstance(inputs[0] , _a ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_a , **_a ) elif isinstance(_a , _a ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(_a , return_tensors=_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[int] ): return self.model(**_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[Any] , _a:List[Any]=None , _a:Tuple=1 , _a:Tuple=True ): # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: snake_case__ = self.model.config.function_to_apply else: snake_case__ = ClassificationFunction.NONE snake_case__ = model_outputs['''logits'''][0] snake_case__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case__ = sigmoid(_a ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case__ = softmax(_a ) elif function_to_apply == ClassificationFunction.NONE: snake_case__ = outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_a ) ] if not _legacy: dict_scores.sort(key=lambda _a : x["score"] , reverse=_a ) if top_k is not None: snake_case__ = dict_scores[:top_k] return dict_scores
33
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __magic_name__ : '''simple docstring''' __lowercase : int = BlenderbotConfig __lowercase : Any = {} __lowercase : Optional[Any] = 'gelu' def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a ) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ): snake_case__ = TFBlenderbotModel(config=_a ).get_decoder() snake_case__ = inputs_dict['''input_ids'''] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict['''attention_mask'''][:1, :] snake_case__ = inputs_dict['''head_mask'''] snake_case__ = 1 # first forward pass snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case__ = model(_a , attention_mask=_a )[0] snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple: if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowercase : Tuple = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Any = True __lowercase : int = False __lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFBlenderbotModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_tokenizers @require_tf class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.'] __lowercase : Optional[int] = 'facebook/blenderbot-400M-distill' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) snake_case__ = self.model.generate( model_inputs.input_ids , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
33
1
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class __magic_name__ (enum.Enum ): '''simple docstring''' __lowercase : Dict = 0 __lowercase : Tuple = 1 __lowercase : List[Any] = 2 @add_end_docstrings(snake_case_ ) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[Any] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self:Dict , *_a:int , **_a:Dict ): super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. snake_case__ = None if self.model.config.prefix is not None: snake_case__ = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. snake_case__ = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. snake_case__ , snake_case__ , snake_case__ = self._sanitize_parameters(prefix=_a , **self._forward_params ) snake_case__ = {**self._preprocess_params, **preprocess_params} snake_case__ = {**self._forward_params, **forward_params} def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str=None , _a:Tuple=None , _a:Optional[Any]=None , _a:str=None , _a:Union[str, Any]=None , _a:Union[str, Any]=None , _a:str=None , _a:int=None , **_a:int , ): snake_case__ = {} if prefix is not None: snake_case__ = prefix if prefix: snake_case__ = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) snake_case__ = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) snake_case__ = handle_long_generation preprocess_params.update(_a ) snake_case__ = generate_kwargs snake_case__ = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) snake_case__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) snake_case__ = ReturnType.TENSORS if return_type is not None: snake_case__ = return_type if clean_up_tokenization_spaces is not None: snake_case__ = clean_up_tokenization_spaces if stop_sequence is not None: snake_case__ = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) snake_case__ = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def SCREAMING_SNAKE_CASE__ ( self:List[str] , *_a:List[str] , **_a:Optional[int] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self:List[str] , _a:str , **_a:int ): return super().__call__(_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:int , _a:int="" , _a:int=None , **_a:int ): snake_case__ = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) snake_case__ = prompt_text if handle_long_generation == "hole": snake_case__ = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: snake_case__ = generate_kwargs['''max_new_tokens'''] else: snake_case__ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: snake_case__ = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) snake_case__ = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: snake_case__ = inputs['''attention_mask'''][:, -keep_length:] return inputs def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] , **_a:Any ): snake_case__ = model_inputs['''input_ids'''] snake_case__ = model_inputs.get('''attention_mask''' , _a ) # Allow empty prompts if input_ids.shape[1] == 0: snake_case__ = None snake_case__ = None snake_case__ = 1 else: snake_case__ = input_ids.shape[0] snake_case__ = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. snake_case__ = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: snake_case__ = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: snake_case__ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length snake_case__ = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL snake_case__ = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) snake_case__ = generated_sequence.shape[0] if self.framework == "pt": snake_case__ = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": snake_case__ = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple , _a:Union[str, Any]=ReturnType.FULL_TEXT , _a:Optional[int]=True ): snake_case__ = model_outputs['''generated_sequence'''][0] snake_case__ = model_outputs['''input_ids'''] snake_case__ = model_outputs['''prompt_text'''] snake_case__ = generated_sequence.numpy().tolist() snake_case__ = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: snake_case__ = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text snake_case__ = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: snake_case__ = 0 else: snake_case__ = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: snake_case__ = prompt_text + text[prompt_length:] else: snake_case__ = text[prompt_length:] snake_case__ = {'''generated_text''': all_text} records.append(_a ) return records
33
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = 0 def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:str ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop('''image_processor_type''' ) snake_case__ = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved snake_case__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): with self.assertRaisesRegex( _a , '''clip-base is not a local folder and is not a valid model identifier''' ): snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): with self.assertRaisesRegex( _a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = True try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
33
1
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self:Tuple , _a:int , _a:str=13 , _a:Optional[int]=30 , _a:List[str]=2 , _a:Dict=3 , _a:Tuple=True , _a:str=True , _a:Dict=32 , _a:str=5 , _a:Dict=4 , _a:Dict=37 , _a:Any="gelu" , _a:Union[str, Any]=0.1 , _a:Dict=0.1 , _a:Any=10 , _a:Union[str, Any]=0.02 , _a:Union[str, Any]=None , _a:str=2 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = is_training snake_case__ = use_labels snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = scope snake_case__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ = (image_size // patch_size) ** 2 snake_case__ = num_patches + 1 def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple , _a:Any , _a:Optional[int] ): snake_case__ = ViTModel(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[Any] , _a:Dict , _a:Union[str, Any] ): snake_case__ = ViTForMaskedImageModeling(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case__ = 1 snake_case__ = ViTForMaskedImageModeling(_a ) model.to(_a ) model.eval() snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ = model(_a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple , _a:str , _a:Optional[int] ): snake_case__ = self.type_sequence_label_size snake_case__ = ViTForImageClassification(_a ) model.to(_a ) model.eval() snake_case__ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ = 1 snake_case__ = ViTForImageClassification(_a ) model.to(_a ) model.eval() snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Tuple = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) __lowercase : Dict = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) __lowercase : Any = True __lowercase : Union[str, Any] = False __lowercase : int = False __lowercase : List[str] = False def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ViTModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self:Any ): pass def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(_a ) snake_case__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def SCREAMING_SNAKE_CASE__ ( self:Tuple ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = ViTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_a ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a ) # forward pass with torch.no_grad(): snake_case__ = model(**_a ) # verify the logits snake_case__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _a ) snake_case__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. snake_case__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_a ) snake_case__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 ) snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ) snake_case__ = inputs.pixel_values.to(_a ) # forward pass with torch.no_grad(): snake_case__ = model(_a , interpolate_pos_encoding=_a ) # verify the logits snake_case__ = torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , _a ) snake_case__ = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ) snake_case__ = inputs.pixel_values.to(_a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): snake_case__ = model(_a )
33
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int: snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ = '''''' else: snake_case__ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: snake_case__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: snake_case__ = dct.pop(__lowerCAmelCase ) snake_case__ = val def SCREAMING_SNAKE_CASE ( ) -> str: snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = ViTConfig() snake_case__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case__ = True snake_case__ = int(vit_name[-12:-10] ) snake_case__ = int(vit_name[-9:-6] ) else: snake_case__ = 1000 snake_case__ = '''huggingface/label-files''' snake_case__ = '''imagenet-1k-id2label.json''' snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(vit_name[-6:-4] ) snake_case__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif vit_name[9:].startswith('''small''' ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 else: pass else: if vit_name[4:].startswith('''small''' ): snake_case__ = 768 snake_case__ = 2304 snake_case__ = 8 snake_case__ = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 elif vit_name[4:].startswith('''huge''' ): snake_case__ = 1280 snake_case__ = 5120 snake_case__ = 32 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() if base_model: remove_classification_head_(__lowerCAmelCase ) snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ = ViTModel(__lowerCAmelCase ).eval() else: snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval() model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case__ = DeiTImageProcessor(size=config.image_size ) else: snake_case__ = ViTImageProcessor(size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case__ = encoding['''pixel_values'''] snake_case__ = model(__lowerCAmelCase ) if base_model: snake_case__ = timm_model.forward_features(__lowerCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 ) else: snake_case__ = timm_model(__lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase__ : str = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
33
1
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int: snake_case__ = (n * (n + 1) // 2) ** 2 snake_case__ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F"""{solution() = }""")
33
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = ['image_processor', 'tokenizer'] __lowercase : str = 'AutoImageProcessor' __lowercase : Dict = 'AutoTokenizer' def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ): snake_case__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) snake_case__ = kwargs.pop('''feature_extractor''' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) snake_case__ = self.image_processor snake_case__ = False def __call__( self:Optional[int] , *_a:str , **_a:int ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) snake_case__ = kwargs.pop('''images''' , _a ) snake_case__ = kwargs.pop('''text''' , _a ) if len(_a ) > 0: snake_case__ = args[0] snake_case__ = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case__ = self.image_processor(_a , *_a , **_a ) if text is not None: snake_case__ = self.tokenizer(_a , **_a ) if text is None: return inputs elif images is None: return encodings else: snake_case__ = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ): return self.tokenizer.decode(*_a , **_a ) @contextmanager def SCREAMING_SNAKE_CASE__ ( self:Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) snake_case__ = True snake_case__ = self.tokenizer yield snake_case__ = self.image_processor snake_case__ = False def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ): if added_vocab is None: snake_case__ = self.tokenizer.get_added_vocab() snake_case__ = {} while tokens: snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE ) if start_token is None: break snake_case__ = start_token.group(1 ) snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE ) snake_case__ = start_token.group() if end_token is None: snake_case__ = tokens.replace(_a , '''''' ) else: snake_case__ = end_token.group() snake_case__ = re.escape(_a ) snake_case__ = re.escape(_a ) snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE ) if content is not None: snake_case__ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a ) if value: if len(_a ) == 1: snake_case__ = value[0] snake_case__ = value else: # leaf nodes snake_case__ = [] for leaf in content.split(r'''<sep/>''' ): snake_case__ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": snake_case__ = leaf[1:-2] # for categorical special tokens output[key].append(_a ) if len(output[key] ) == 1: snake_case__ = output[key][0] snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a ) if len(_a ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
33
1
import numpy as np def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return vector * sigmoid(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
33
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : '''simple docstring''' def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = num_channels snake_case__ = embeddings_size snake_case__ = hidden_sizes snake_case__ = depths snake_case__ = is_training snake_case__ = use_labels snake_case__ = hidden_act snake_case__ = num_labels snake_case__ = scope snake_case__ = len(_a ) snake_case__ = out_features snake_case__ = out_indices snake_case__ = num_groups def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ): snake_case__ = BitModel(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ): snake_case__ = self.num_labels snake_case__ = BitForImageClassification(_a ) model.to(_a ) model.eval() snake_case__ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ): snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ = None snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ = config_and_inputs snake_case__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowercase : int = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) __lowercase : Tuple = False __lowercase : Optional[Any] = False __lowercase : str = False __lowercase : Tuple = False __lowercase : Tuple = False def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = BitModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return @unittest.skip(reason='''Bit does not output attentions''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(_a ) snake_case__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(config=_a ) for name, module in model.named_modules(): if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ): snake_case__ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): snake_case__ = model(**self._prepare_for_class(_a , _a ) ) snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ = self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ = layer_type snake_case__ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ = True check_hidden_states_output(_a , _a , _a ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = BitModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def SCREAMING_SNAKE_CASE ( ) -> Any: snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a ) # forward pass with torch.no_grad(): snake_case__ = model(**_a ) # verify the logits snake_case__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _a ) snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @require_torch class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else () __lowercase : int = BitConfig __lowercase : Any = False def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = BitModelTester(self )
33
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __magic_name__ (snake_case_ ,snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 1 @register_to_config def __init__( self:List[Any] , _a:Dict=20_00 , _a:Optional[int]=0.1 , _a:List[Any]=20 , _a:Union[str, Any]=1e-3 ): snake_case__ = None snake_case__ = None snake_case__ = None def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Optional[Any] , _a:Union[str, torch.device] = None ): snake_case__ = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:List[Any] , _a:Optional[int] , _a:Tuple=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score snake_case__ = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) snake_case__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) snake_case__ = std.flatten() while len(std.shape ) < len(score.shape ): snake_case__ = std.unsqueeze(-1 ) snake_case__ = -score / std # compute snake_case__ = -1.0 / len(self.timesteps ) snake_case__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) snake_case__ = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): snake_case__ = beta_t.unsqueeze(-1 ) snake_case__ = -0.5 * beta_t * x snake_case__ = torch.sqrt(_a ) snake_case__ = drift - diffusion**2 * score snake_case__ = x + drift * dt # add noise snake_case__ = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) snake_case__ = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self:Dict ): return self.config.num_train_timesteps
33
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCamelCase__ : Any = """\ """ lowerCamelCase__ : List[str] = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ lowerCamelCase__ : Any = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case__ = '''cuda''' else: snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' snake_case__ = AutoModelForCausalLM.from_pretrained(_a ) snake_case__ = model.to(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case__ = model.config.max_length - 1 else: snake_case__ = model.config.max_length snake_case__ = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) snake_case__ = encodings['''input_ids'''] snake_case__ = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case__ = [] snake_case__ = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): snake_case__ = min(start_index + batch_size , len(_a ) ) snake_case__ = encoded_texts[start_index:end_index] snake_case__ = attn_masks[start_index:end_index] if add_start_token: snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) snake_case__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) snake_case__ = encoded_batch with torch.no_grad(): snake_case__ = model(_a , attention_mask=_a ).logits snake_case__ = out_logits[..., :-1, :].contiguous() snake_case__ = labels[..., 1:].contiguous() snake_case__ = attn_mask[..., 1:].contiguous() snake_case__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
33
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCamelCase__ : str = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: if args.student_type == "roberta": snake_case__ = False elif args.student_type == "gpt2": snake_case__ = False def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any: if args.student_type == "roberta": snake_case__ = False def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: snake_case__ = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=__lowerCAmelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=__lowerCAmelCase , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=__lowerCAmelCase , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=__lowerCAmelCase , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=__lowerCAmelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=__lowerCAmelCase , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=__lowerCAmelCase , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=__lowerCAmelCase , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=__lowerCAmelCase , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowerCAmelCase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5e-4 , type=__lowerCAmelCase , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=__lowerCAmelCase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__lowerCAmelCase , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=__lowerCAmelCase , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=__lowerCAmelCase , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=__lowerCAmelCase , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=__lowerCAmelCase , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=__lowerCAmelCase , default=4000 , help='''Checkpoint interval.''' ) snake_case__ = parser.parse_args() sanity_checks(__lowerCAmelCase ) # ARGS # init_gpu_params(__lowerCAmelCase ) set_seed(__lowerCAmelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(F"""Param: {args}""" ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(__lowerCAmelCase ) , __lowerCAmelCase , indent=4 ) git_log(args.dump_path ) snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[args.student_type] snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # snake_case__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) snake_case__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): snake_case__ = tokenizer.all_special_tokens.index(__lowerCAmelCase ) snake_case__ = tokenizer.all_special_ids[idx] logger.info(F"""Special tokens {special_tok_ids}""" ) snake_case__ = special_tok_ids snake_case__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F"""Loading data from {args.data_file}""" ) with open(args.data_file , '''rb''' ) as fp: snake_case__ = pickle.load(__lowerCAmelCase ) if args.mlm: logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , '''rb''' ) as fp: snake_case__ = pickle.load(__lowerCAmelCase ) snake_case__ = np.maximum(__lowerCAmelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): snake_case__ = 0.0 # do not predict special tokens snake_case__ = torch.from_numpy(__lowerCAmelCase ) else: snake_case__ = None snake_case__ = LmSeqsDataset(params=__lowerCAmelCase , data=__lowerCAmelCase ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F"""Loading student config from {args.student_config}""" ) snake_case__ = student_config_class.from_pretrained(args.student_config ) snake_case__ = True if args.student_pretrained_weights is not None: logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" ) snake_case__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCAmelCase ) else: snake_case__ = student_model_class(__lowerCAmelCase ) if args.n_gpu > 0: student.to(F"""cuda:{args.local_rank}""" ) logger.info('''Student loaded.''' ) # TEACHER # snake_case__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCAmelCase ) if args.n_gpu > 0: teacher.to(F"""cuda:{args.local_rank}""" ) logger.info(F"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__lowerCAmelCase , __lowerCAmelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__lowerCAmelCase , __lowerCAmelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() snake_case__ = Distiller( params=__lowerCAmelCase , dataset=__lowerCAmelCase , token_probs=__lowerCAmelCase , student=__lowerCAmelCase , teacher=__lowerCAmelCase ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
33
import os from datetime import datetime as dt from github import Github lowerCamelCase__ : int = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] ) snake_case__ = g.get_repo('''huggingface/diffusers''' ) snake_case__ = repo.get_issues(state='''open''' ) for issue in open_issues: snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
33
1
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: # Construct model if gpta_config_file == "": snake_case__ = GPTaConfig() else: snake_case__ = GPTaConfig.from_json_file(__lowerCAmelCase ) snake_case__ = GPTaModel(__lowerCAmelCase ) # Load weights from numpy load_tf_weights_in_gpta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model snake_case__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME snake_case__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , __lowerCAmelCase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) lowerCamelCase__ : int = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
33
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = _distribute_shards(**__lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if expected is RuntimeError: with pytest.raises(__lowerCAmelCase ): _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) else: snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) assert out == expected
33
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]: snake_case__ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Dict = StableDiffusionLatentUpscalePipeline __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } __lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} __lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowercase : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __lowercase : List[Any] = frozenset([] ) __lowercase : Any = True @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = 1 snake_case__ = 4 snake_case__ = (16, 16) snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a ) return image def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): torch.manual_seed(0 ) snake_case__ = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) snake_case__ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) snake_case__ = CLIPTextModel(_a ) snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''cpu''' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) snake_case__ = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): super().test_save_load_local(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = 2 snake_case__ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue snake_case__ = getattr(_a , scheduler_enum.name ) snake_case__ = scheduler_cls.from_config(pipe.scheduler.config ) snake_case__ = pipe(**_a )[0] outputs.append(_a ) assert check_same_shape(_a ) @require_torch_gpu @slow class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' snake_case__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-2
33
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : str = IFImgaImgSuperResolutionPipeline __lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} __lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) __lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'} def SCREAMING_SNAKE_CASE__ ( self:Dict ): return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:str ): self._test_save_load_local() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
33
1
from math import factorial def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(__lowerCAmelCase ) // (factorial(__lowerCAmelCase ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", F"""fifty-two card deck is: {combinations(5_2, 5)}\n""", ) print( """If a class of 40 students must be arranged into groups of""", F"""4 for group projects, there are {combinations(4_0, 4)} ways""", """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", F"""are {combinations(1_0, 3)} ways that first, second and""", """third place can be awarded.""", )
33
import math class __magic_name__ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ): snake_case__ = 0.0 snake_case__ = 0.0 for i in range(len(_a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ): for i in range(len(_a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def SCREAMING_SNAKE_CASE ( ) -> None: # Training Examples ( m, n ) snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case__ = SelfOrganizingMap() snake_case__ = 3 snake_case__ = 0.5 for _ in range(__lowerCAmelCase ): for j in range(len(__lowerCAmelCase ) ): # training sample snake_case__ = training_samples[j] # Compute the winning vector snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # Update the winning vector snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # classify test sample snake_case__ = [0, 0, 0, 1] snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
33
1
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] snake_case__ = grid[0] for row_n in range(1 , len(__lowerCAmelCase ) ): snake_case__ = grid[row_n] snake_case__ = fill_row(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = grid[row_n] return grid[-1][-1] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list: current_row[0] += row_above[0] for cell_n in range(1 , len(__lowerCAmelCase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
33
from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes snake_case__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] snake_case__ = [] snake_case__ = 0 snake_case__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: snake_case__ = [] snake_case__ = -1 for i in range(__lowerCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: snake_case__ = i total_time += burst_time[target_process] completed += 1 snake_case__ = 0 snake_case__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7] lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0] lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
33
1
import os from datetime import datetime as dt from github import Github lowerCamelCase__ : int = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] ) snake_case__ = g.get_repo('''huggingface/diffusers''' ) snake_case__ = repo.get_issues(state='''open''' ) for issue in open_issues: snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
33
lowerCamelCase__ : List[str] = """Alexander Joslin""" import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} snake_case__ = Stack() snake_case__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCAmelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCAmelCase ) elif i == ")": # RULE 4 snake_case__ = operator_stack.peek() operator_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase ) operand_stack.push(__lowerCAmelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
33
1
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : str = (CMStochasticIterativeScheduler,) __lowercase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ): snake_case__ = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**_a ) return config def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = 10 snake_case__ = self.get_scheduler_config() snake_case__ = self.scheduler_classes[0](**_a ) scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps[0] snake_case__ = scheduler.timesteps[1] snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self:Any ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=_a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = 1 scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(_a ): # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [1_06, 0] scheduler.set_timesteps(timesteps=_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 15, 0] with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 1, 0] snake_case__ = len(_a ) with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [scheduler.config.num_train_timesteps] with self.assertRaises( _a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_a )
33
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCamelCase__ : int = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ): warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _a , ) super().__init__(*_a , **_a )
33
1
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): snake_case__ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__lowerCAmelCase ) if number < 1: snake_case__ = F"""Input value of [number={number}] must be > 0""" raise ValueError(__lowerCAmelCase ) snake_case__ = 1 for i in range(1 , __lowerCAmelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ : Tuple = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes snake_case__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] snake_case__ = [] snake_case__ = 0 snake_case__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: snake_case__ = [] snake_case__ = -1 for i in range(__lowerCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: snake_case__ = i total_time += burst_time[target_process] completed += 1 snake_case__ = 0 snake_case__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7] lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0] lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
33
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]: snake_case__ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Dict = StableDiffusionLatentUpscalePipeline __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } __lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} __lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowercase : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __lowercase : List[Any] = frozenset([] ) __lowercase : Any = True @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = 1 snake_case__ = 4 snake_case__ = (16, 16) snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a ) return image def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): torch.manual_seed(0 ) snake_case__ = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) snake_case__ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) snake_case__ = CLIPTextModel(_a ) snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''cpu''' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) snake_case__ = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): super().test_save_load_local(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = 2 snake_case__ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue snake_case__ = getattr(_a , scheduler_enum.name ) snake_case__ = scheduler_cls.from_config(pipe.scheduler.config ) snake_case__ = pipe(**_a )[0] outputs.append(_a ) assert check_same_shape(_a ) @require_torch_gpu @slow class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' snake_case__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-2
33
1
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
33
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''ZinengTang/tvlt-base''' snake_case__ = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _a ) self.assertIsInstance(processor.image_processor , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = feature_extractor(_a , return_tensors='''np''' ) snake_case__ = processor(audio=_a , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = image_processor(_a , return_tensors='''np''' ) snake_case__ = processor(images=_a , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = processor(audio=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
33
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : List[str] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } lowerCamelCase__ : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]: snake_case__ = {} with open(__lowerCAmelCase , '''r''' ) as file: for line_number, line in enumerate(__lowerCAmelCase ): snake_case__ = line.strip() if line: snake_case__ = line.split() snake_case__ = line_number snake_case__ = words[0] snake_case__ = value return result def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: for attribute in key.split('''.''' ): snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__lowerCAmelCase ): snake_case__ = PARAM_MAPPING[full_name.split('''.''' )[-1]] snake_case__ = '''param''' if weight_type is not None and weight_type != "param": snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape elif weight_type is not None and weight_type == "param": snake_case__ = hf_pointer for attribute in hf_param_name.split('''.''' ): snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = shape_pointer.shape # let's reduce dimension snake_case__ = value[0] else: snake_case__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case__ = value elif weight_type == "weight_g": snake_case__ = value elif weight_type == "weight_v": snake_case__ = value elif weight_type == "bias": snake_case__ = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = value else: snake_case__ = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: snake_case__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__lowerCAmelCase ): snake_case__ = PARAM_MAPPING[full_name.split('''.''' )[-1]] snake_case__ = '''param''' if weight_type is not None and weight_type != "param": snake_case__ = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case__ = '''.'''.join([key, hf_param_name] ) else: snake_case__ = key snake_case__ = value if '''lm_head''' in full_key else value[0] lowerCamelCase__ : Optional[int] = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Union[str, Any]: snake_case__ = False for key, mapped_key in MAPPING.items(): snake_case__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: snake_case__ = True if "*" in mapped_key: snake_case__ = name.split(__lowerCAmelCase )[0].split('''.''' )[-2] snake_case__ = mapped_key.replace('''*''' , __lowerCAmelCase ) if "weight_g" in name: snake_case__ = '''weight_g''' elif "weight_v" in name: snake_case__ = '''weight_v''' elif "bias" in name: snake_case__ = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ = '''weight''' else: snake_case__ = None if hf_dict is not None: rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return is_used return is_used def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = [] snake_case__ = fairseq_model.state_dict() snake_case__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case__ = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , ) snake_case__ = True else: snake_case__ = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: snake_case__ = full_name.split('''conv_layers.''' )[-1] snake_case__ = name.split('''.''' ) snake_case__ = int(items[0] ) snake_case__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case__ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case__ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case__ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case__ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ) -> List[str]: if config_path is not None: snake_case__ = WavaVecaConfig.from_pretrained(__lowerCAmelCase ) else: snake_case__ = WavaVecaConfig() if is_seq_class: snake_case__ = read_txt_into_dict(__lowerCAmelCase ) snake_case__ = idalabel snake_case__ = WavaVecaForSequenceClassification(__lowerCAmelCase ) snake_case__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) feature_extractor.save_pretrained(__lowerCAmelCase ) elif is_finetuned: if dict_path: snake_case__ = Dictionary.load(__lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case__ = target_dict.pad_index snake_case__ = target_dict.bos_index snake_case__ = target_dict.eos_index snake_case__ = len(target_dict.symbols ) snake_case__ = os.path.join(__lowerCAmelCase , '''vocab.json''' ) if not os.path.isdir(__lowerCAmelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) ) return os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) snake_case__ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case__ = 0 snake_case__ = 1 with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = WavaVecaCTCTokenizer( __lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCAmelCase , ) snake_case__ = True if config.feat_extract_norm == '''layer''' else False snake_case__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) snake_case__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) snake_case__ = WavaVecaForCTC(__lowerCAmelCase ) else: snake_case__ = WavaVecaForPreTraining(__lowerCAmelCase ) if is_finetuned or is_seq_class: snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: snake_case__ = argparse.Namespace(task='''audio_pretraining''' ) snake_case__ = fairseq.tasks.setup_task(__lowerCAmelCase ) snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase ) snake_case__ = model[0].eval() recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) lowerCamelCase__ : Union[str, Any] = parser.parse_args() lowerCamelCase__ : int = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'data2vec-vision' def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ): super().__init__(**_a ) snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = use_mask_token snake_case__ = use_absolute_position_embeddings snake_case__ = use_relative_position_bias snake_case__ = use_shared_relative_position_bias snake_case__ = layer_scale_init_value snake_case__ = drop_path_rate snake_case__ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case__ = out_indices snake_case__ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case__ = use_auxiliary_head snake_case__ = auxiliary_loss_weight snake_case__ = auxiliary_channels snake_case__ = auxiliary_num_convs snake_case__ = auxiliary_concat_input snake_case__ = semantic_loss_ignore_index class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Any = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return 1e-4
33
1
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Optional[int]=0 ): snake_case__ = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_a ) ) snake_case__ = np.random.RandomState(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) snake_case__ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) snake_case__ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) # warmup pass to apply optimizations snake_case__ = pipe(**self.get_dummy_inputs() ) snake_case__ = self.get_dummy_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) snake_case__ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) snake_case__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) snake_case__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs() snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) snake_case__ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __magic_name__ (unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = ort.SessionOptions() snake_case__ = False return options def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) snake_case__ = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = '''A fantasy landscape, trending on artstation''' snake_case__ = np.random.RandomState(0 ) snake_case__ = pipe( prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , ) snake_case__ = output.images snake_case__ = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) snake_case__ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) snake_case__ = init_image.resize((7_68, 5_12) ) snake_case__ = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = '''A fantasy landscape, trending on artstation''' snake_case__ = np.random.RandomState(0 ) snake_case__ = pipe( prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , ) snake_case__ = output.images snake_case__ = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) snake_case__ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
33
import os import sys lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCamelCase__ : Optional[int] = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple: return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
33
1
from collections.abc import Sequence from queue import Queue class __magic_name__ : '''simple docstring''' def __init__( self:str , _a:List[Any] , _a:Optional[Any] , _a:List[Any] , _a:Dict=None , _a:List[Any]=None ): snake_case__ = start snake_case__ = end snake_case__ = val snake_case__ = (start + end) // 2 snake_case__ = left snake_case__ = right def __repr__( self:Dict ): return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class __magic_name__ : '''simple docstring''' def __init__( self:Dict , _a:Sequence , _a:Dict ): snake_case__ = collection snake_case__ = function if self.collection: snake_case__ = self._build_tree(0 , len(_a ) - 1 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[Any] , _a:Optional[Any] ): self._update_tree(self.root , _a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str , _a:str ): return self._query_range(self.root , _a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:str , _a:List[Any] ): if start == end: return SegmentTreeNode(_a , _a , self.collection[start] ) snake_case__ = (start + end) // 2 snake_case__ = self._build_tree(_a , _a ) snake_case__ = self._build_tree(mid + 1 , _a ) return SegmentTreeNode(_a , _a , self.fn(left.val , right.val ) , _a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Any , _a:Union[str, Any] , _a:Optional[Any] ): if node.start == i and node.end == i: snake_case__ = val return if i <= node.mid: self._update_tree(node.left , _a , _a ) else: self._update_tree(node.right , _a , _a ) snake_case__ = self.fn(node.left.val , node.right.val ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[str] , _a:Any , _a:Any ): if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , _a , _a ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , _a , node.mid ) , self._query_range(node.right , node.mid + 1 , _a ) , ) else: # range in right child tree return self._query_range(node.right , _a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): if self.root is not None: snake_case__ = Queue() queue.put(self.root ) while not queue.empty(): snake_case__ = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 5_0) lowerCamelCase__ : int = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
33
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : str = (CMStochasticIterativeScheduler,) __lowercase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ): snake_case__ = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**_a ) return config def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = 10 snake_case__ = self.get_scheduler_config() snake_case__ = self.scheduler_classes[0](**_a ) scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps[0] snake_case__ = scheduler.timesteps[1] snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self:Any ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=_a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = 1 scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(_a ): # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [1_06, 0] scheduler.set_timesteps(timesteps=_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 15, 0] with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 1, 0] snake_case__ = len(_a ) with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [scheduler.config.num_train_timesteps] with self.assertRaises( _a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_a )
33
1
import os import sys lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCamelCase__ : Optional[int] = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple: return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
33
import numpy as np def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return vector * sigmoid(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
33
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> List[str]: snake_case__ = [] for _ in range(__lowerCAmelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> Dict: snake_case__ = [] for step in range(__lowerCAmelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = os.path.join(__lowerCAmelCase , '''schedule.bin''' ) torch.save(scheduler.state_dict() , __lowerCAmelCase ) snake_case__ = torch.load(__lowerCAmelCase ) scheduler.load_state_dict(__lowerCAmelCase ) return lrs @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[Any] , _a:Optional[Any] , _a:int ): self.assertEqual(len(_a ) , len(_a ) ) for a, b in zip(_a , _a ): self.assertAlmostEqual(_a , _a , delta=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a ) snake_case__ = torch.tensor([0.4, 0.2, -0.5] ) snake_case__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping snake_case__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): snake_case__ = criterion(_a , _a ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a ) snake_case__ = torch.tensor([0.4, 0.2, -0.5] ) snake_case__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping snake_case__ = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_a , weight_decay=0.0 , relative_step=_a , scale_parameter=_a , warmup_init=_a , ) for _ in range(10_00 ): snake_case__ = criterion(_a , _a ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : int = nn.Linear(50 ,50 ) if is_torch_available() else None __lowercase : Optional[Any] = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None __lowercase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[int] , _a:Optional[int] , _a:Union[str, Any] , _a:int=None ): self.assertEqual(len(_a ) , len(_a ) ) for a, b in zip(_a , _a ): self.assertAlmostEqual(_a , _a , delta=_a , msg=_a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) snake_case__ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): snake_case__ , snake_case__ = data snake_case__ = scheduler_func(self.optimizer , **_a ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) snake_case__ = unwrap_schedule(_a , self.num_steps ) self.assertListAlmostEqual( _a , _a , tol=1e-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , ) snake_case__ = scheduler_func(self.optimizer , **_a ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_a ) # wrap to test picklability of the schedule snake_case__ = unwrap_and_save_reload_schedule(_a , self.num_steps ) self.assertListEqual(_a , _a , msg=F"""failed for {scheduler_func} in save and reload""" ) class __magic_name__ : '''simple docstring''' def __init__( self:Any , _a:Dict ): snake_case__ = fn def __call__( self:Any , *_a:Dict , **_a:List[str] ): return self.fn(*_a , **_a ) @classmethod def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Tuple ): snake_case__ = list(map(self , scheduler.lr_lambdas ) )
33
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int: snake_case__ = set() snake_case__ = 0 snake_case__ = n + 1 # maximum limit for a in range(2 , __lowerCAmelCase ): for b in range(2 , __lowerCAmelCase ): snake_case__ = a**b # calculates the current power collect_powers.add(__lowerCAmelCase ) # adds the result to the set return len(__lowerCAmelCase ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
33
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = '''ylacombe/bark-small''' snake_case__ = tempfile.mkdtemp() snake_case__ = '''en_speaker_1''' snake_case__ = '''This is a test string''' snake_case__ = '''speaker_embeddings_path.json''' snake_case__ = '''speaker_embeddings''' def SCREAMING_SNAKE_CASE__ ( self:Any , **_a:List[Any] ): return AutoTokenizer.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.get_tokenizer() snake_case__ = BarkProcessor(tokenizer=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) snake_case__ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case__ = 35 snake_case__ = 2 snake_case__ = 8 snake_case__ = { '''semantic_prompt''': np.ones(_a ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case__ = processor(text=self.input_string , voice_preset=_a ) snake_case__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case__ = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(_a , **_a ) snake_case__ = processor(text=self.input_string , voice_preset=_a ) snake_case__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case__ = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = self.get_tokenizer() snake_case__ = BarkProcessor(tokenizer=_a ) snake_case__ = processor(text=self.input_string ) snake_case__ = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
33
from copy import deepcopy class __magic_name__ : '''simple docstring''' def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ): if arr is None and size is not None: snake_case__ = size snake_case__ = [0] * size elif arr is not None: self.init(_a ) else: raise ValueError('''Either arr or size must be specified''' ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ): snake_case__ = len(_a ) snake_case__ = deepcopy(_a ) for i in range(1 , self.size ): snake_case__ = self.next_(_a ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ = self.next_(_a ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ = self.next_(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): self.add(_a , value - self.get(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ): if right == 0: return 0 snake_case__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ = self.prev(_a ) return result def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): return self.prefix(_a ) - self.prefix(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): return self.query(_a , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): value -= self.tree[0] if value < 0: return -1 snake_case__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
1
lowerCamelCase__ : Any = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCamelCase__ : int = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float: snake_case__ = from_type.lower().strip('''s''' ) snake_case__ = to_type.lower().strip('''s''' ) snake_case__ = UNIT_SYMBOL.get(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = UNIT_SYMBOL.get(__lowerCAmelCase , __lowerCAmelCase ) if from_sanitized not in METRIC_CONVERSION: snake_case__ = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(__lowerCAmelCase )}""" ) raise ValueError(__lowerCAmelCase ) if to_sanitized not in METRIC_CONVERSION: snake_case__ = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(__lowerCAmelCase )}""" ) raise ValueError(__lowerCAmelCase ) snake_case__ = METRIC_CONVERSION[from_sanitized] snake_case__ = METRIC_CONVERSION[to_sanitized] snake_case__ = 1 if from_exponent > to_exponent: snake_case__ = from_exponent - to_exponent else: snake_case__ = -(to_exponent - from_exponent) return value * pow(10 , __lowerCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
33
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __magic_name__ : '''simple docstring''' __lowercase : int = BlenderbotConfig __lowercase : Any = {} __lowercase : Optional[Any] = 'gelu' def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a ) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ): snake_case__ = TFBlenderbotModel(config=_a ).get_decoder() snake_case__ = inputs_dict['''input_ids'''] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict['''attention_mask'''][:1, :] snake_case__ = inputs_dict['''head_mask'''] snake_case__ = 1 # first forward pass snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case__ = model(_a , attention_mask=_a )[0] snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple: if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowercase : Tuple = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Any = True __lowercase : int = False __lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFBlenderbotModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_tokenizers @require_tf class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.'] __lowercase : Optional[int] = 'facebook/blenderbot-400M-distill' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) snake_case__ = self.model.generate( model_inputs.input_ids , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
33
1
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCamelCase__ : int = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ): warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _a , ) super().__init__(*_a , **_a )
33
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = 0 def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:str ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop('''image_processor_type''' ) snake_case__ = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved snake_case__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): with self.assertRaisesRegex( _a , '''clip-base is not a local folder and is not a valid model identifier''' ): snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): with self.assertRaisesRegex( _a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = True try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
33
1
import os import jsonlines import numpy as np from tqdm import tqdm lowerCamelCase__ : Optional[Any] = 2_0_4_8 lowerCamelCase__ : Tuple = 4_0_9_6 lowerCamelCase__ : Any = 4_2 lowerCamelCase__ : str = os.environ.pop("""PROCESS_TRAIN""", """false""") lowerCamelCase__ : Any = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4} def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]: def choose_first(__lowerCAmelCase , __lowerCAmelCase=False ): assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: snake_case__ = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: snake_case__ = {k: [a[k]] for k in a} if len(a['''start_token'''] ) > 0: break return a snake_case__ = {'''id''': example['''id''']} snake_case__ = example['''annotations'''] snake_case__ = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: snake_case__ = ['''yes'''] if 1 in yes_no_answer else ['''no'''] snake_case__ = snake_case__ = [] snake_case__ = snake_case__ = [] snake_case__ = ['''<cls>'''] else: snake_case__ = ['''short'''] snake_case__ = choose_first(annotation['''short_answers'''] ) if len(out['''start_token'''] ) == 0: # answer will be long if short is not available snake_case__ = ['''long'''] snake_case__ = choose_first(annotation['''long_answer'''] , is_long_answer=__lowerCAmelCase ) snake_case__ = [] answer.update(__lowerCAmelCase ) # disregard some samples if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]: snake_case__ = True else: snake_case__ = False snake_case__ = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] , __lowerCAmelCase ) for k in cols ): raise ValueError('''Issue in ID''' , example['''id'''] ) return answer def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict: snake_case__ = _get_single_answer(__lowerCAmelCase ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case__ = example['''document''']['''tokens'''] snake_case__ = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) return { "context": " ".join(__lowerCAmelCase ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples snake_case__ = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 snake_case__ = example['''document''']['''tokens'''] snake_case__ = answer['''start_token'''] snake_case__ = answer['''end_token'''] snake_case__ = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 snake_case__ = ''' '''.join(context[start_token:end_token] ) # checking above code if assertion: snake_case__ = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] snake_case__ = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] snake_case__ = ''' '''.join([old[i] for i in range(len(__lowerCAmelCase ) ) if not is_html[i]] ) if new != old: print('''ID:''' , example['''id'''] ) print('''New:''' , __lowerCAmelCase , end='''\n''' ) print('''Old:''' , __lowerCAmelCase , end='''\n\n''' ) return { "context": " ".join(__lowerCAmelCase ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=2048 , __lowerCAmelCase=4096 , __lowerCAmelCase=True ) -> Dict: # overlap will be of doc_stride - q_len snake_case__ = get_context_and_ans(__lowerCAmelCase , assertion=__lowerCAmelCase ) snake_case__ = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } snake_case__ = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids snake_case__ = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case__ = [] snake_case__ = [] snake_case__ = input_ids[:q_len] snake_case__ = range(__lowerCAmelCase , len(__lowerCAmelCase ) , max_length - doc_stride ) for i in doc_start_indices: snake_case__ = i + max_length - q_len snake_case__ = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['''category'''][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(__lowerCAmelCase ), "end_token": [-100] * len(__lowerCAmelCase ), "category": category, }, } snake_case__ = out['''context'''].split() snake_case__ = splitted_context[answer['''end_token''']] snake_case__ = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=__lowerCAmelCase , ).input_ids ) snake_case__ = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=__lowerCAmelCase ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token snake_case__ = len(tokenizer(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 snake_case__ = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive snake_case__ = answer['''start_token'''] snake_case__ = answer['''end_token'''] if assertion: snake_case__ = tokenizer.decode(__lowerCAmelCase ) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''' ) print('''OLD:''' , answer['''span'''] ) print('''NEW:''' , __lowerCAmelCase , end='''\n\n''' ) if len(__lowerCAmelCase ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } snake_case__ = input_ids[:q_len] snake_case__ = range(__lowerCAmelCase , len(__lowerCAmelCase ) , max_length - doc_stride ) snake_case__ = [] snake_case__ = [] snake_case__ = [] snake_case__ = [] # null, yes, no, long, short for i in doc_start_indices: snake_case__ = i + max_length - q_len snake_case__ = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: snake_case__ = start_token - i + q_len snake_case__ = end_token - i + q_len answers_category.append(answer['''category'''][0] ) # ["short"] -> "short" else: snake_case__ = -100 snake_case__ = -100 answers_category.append('''null''' ) snake_case__ = inputs[-1][start_token : end_token + 1] answers_start_token.append(__lowerCAmelCase ) answers_end_token.append(__lowerCAmelCase ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' , example['''id'''] ) print('''New:''' , tokenizer.decode(__lowerCAmelCase ) ) print('''Old:''' , tokenizer.decode(__lowerCAmelCase ) , end='''\n\n''' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=2048 , __lowerCAmelCase=4096 , __lowerCAmelCase=False ) -> List[Any]: snake_case__ = get_strided_contexts_and_ans( __lowerCAmelCase , __lowerCAmelCase , doc_stride=__lowerCAmelCase , max_length=__lowerCAmelCase , assertion=__lowerCAmelCase , ) return example def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: with jsonlines.open(__lowerCAmelCase , '''a''' ) as writer: for example in tqdm(__lowerCAmelCase , total=len(__lowerCAmelCase ) , desc='''Saving samples ... ''' ): snake_case__ = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer lowerCamelCase__ : Union[str, Any] = load_dataset("""natural_questions""") lowerCamelCase__ : str = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") lowerCamelCase__ : List[str] = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""] lowerCamelCase__ : List[str] = { """tokenizer""": tokenizer, """doc_stride""": DOC_STRIDE, """max_length""": MAX_LENGTH, """assertion""": False, } lowerCamelCase__ : List[Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs) lowerCamelCase__ : Any = data.remove_columns(["""annotations""", """document""", """id""", """question"""]) print(data) np.random.seed(SEED) lowerCamelCase__ : Tuple = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl""" save_to_disk(data, file_name=cache_file_name)
33
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int: snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ = '''''' else: snake_case__ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: snake_case__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: snake_case__ = dct.pop(__lowerCAmelCase ) snake_case__ = val def SCREAMING_SNAKE_CASE ( ) -> str: snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = ViTConfig() snake_case__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case__ = True snake_case__ = int(vit_name[-12:-10] ) snake_case__ = int(vit_name[-9:-6] ) else: snake_case__ = 1000 snake_case__ = '''huggingface/label-files''' snake_case__ = '''imagenet-1k-id2label.json''' snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(vit_name[-6:-4] ) snake_case__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif vit_name[9:].startswith('''small''' ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 else: pass else: if vit_name[4:].startswith('''small''' ): snake_case__ = 768 snake_case__ = 2304 snake_case__ = 8 snake_case__ = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 elif vit_name[4:].startswith('''huge''' ): snake_case__ = 1280 snake_case__ = 5120 snake_case__ = 32 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() if base_model: remove_classification_head_(__lowerCAmelCase ) snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ = ViTModel(__lowerCAmelCase ).eval() else: snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval() model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case__ = DeiTImageProcessor(size=config.image_size ) else: snake_case__ = ViTImageProcessor(size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case__ = encoding['''pixel_values'''] snake_case__ = model(__lowerCAmelCase ) if base_model: snake_case__ = timm_model.forward_features(__lowerCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 ) else: snake_case__ = timm_model(__lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase__ : str = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
33
1
import pickle import numpy as np from matplotlib import pyplot as plt class __magic_name__ : '''simple docstring''' def __init__( self:List[str] , _a:str , _a:Optional[Any] , _a:Union[str, Any] , _a:Union[str, Any] , _a:Optional[Any] , _a:int=0.2 , _a:Optional[int]=0.2 ): snake_case__ = bp_numa snake_case__ = bp_numa snake_case__ = bp_numa snake_case__ = conva_get[:2] snake_case__ = conva_get[2] snake_case__ = size_pa snake_case__ = rate_w snake_case__ = rate_t snake_case__ = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] snake_case__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case__ = -2 * np.random.rand(self.conva[1] ) + 1 snake_case__ = -2 * np.random.rand(self.num_bpa ) + 1 snake_case__ = -2 * np.random.rand(self.num_bpa ) + 1 def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Optional[Any] ): # save model dict with pickle snake_case__ = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_a , '''wb''' ) as f: pickle.dump(_a , _a ) print(F"""Model saved: {save_path}""" ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Any , _a:Tuple ): # read saved model with open(_a , '''rb''' ) as f: snake_case__ = pickle.load(_a ) # noqa: S301 snake_case__ = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) snake_case__ = model_dic.get('''size_pooling1''' ) snake_case__ = model_dic.get('''num_bp1''' ) snake_case__ = model_dic.get('''num_bp2''' ) snake_case__ = model_dic.get('''num_bp3''' ) snake_case__ = model_dic.get('''rate_weight''' ) snake_case__ = model_dic.get('''rate_thre''' ) # create model instance snake_case__ = CNN(_a , _a , _a , _a , _a , _a , _a ) # modify model parameter snake_case__ = model_dic.get('''w_conv1''' ) snake_case__ = model_dic.get('''wkj''' ) snake_case__ = model_dic.get('''vji''' ) snake_case__ = model_dic.get('''thre_conv1''' ) snake_case__ = model_dic.get('''thre_bp2''' ) snake_case__ = model_dic.get('''thre_bp3''' ) return conv_ins def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int ): return 1 / (1 + np.exp(-1 * x )) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Dict ): return round(_a , 3 ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any , _a:Optional[Any] , _a:Union[str, Any] , _a:int , _a:List[Any] ): # convolution process snake_case__ = convs[0] snake_case__ = convs[1] snake_case__ = np.shape(_a )[0] # get the data slice of original image data, data_focus snake_case__ = [] for i_focus in range(0 , size_data - size_conv + 1 , _a ): for j_focus in range(0 , size_data - size_conv + 1 , _a ): snake_case__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_a ) # calculate the feature map of every single kernel, and saved as list of matrix snake_case__ = [] snake_case__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_a ): snake_case__ = [] for i_focus in range(len(_a ) ): snake_case__ = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_a ) ) snake_case__ = np.asmatrix(_a ).reshape( _a , _a ) data_featuremap.append(_a ) # expanding the data slice to One dimenssion snake_case__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_a ) ) snake_case__ = np.asarray(_a ) return focus_list, data_featuremap def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[str] , _a:str , _a:List[Any]="average_pool" ): # pooling process snake_case__ = len(featuremaps[0] ) snake_case__ = int(size_map / size_pooling ) snake_case__ = [] for i_map in range(len(_a ) ): snake_case__ = featuremaps[i_map] snake_case__ = [] for i_focus in range(0 , _a , _a ): for j_focus in range(0 , _a , _a ): snake_case__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_a ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_a ) ) snake_case__ = np.asmatrix(_a ).reshape(_a , _a ) featuremap_pooled.append(_a ) return featuremap_pooled def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Union[str, Any] ): # expanding three dimension data to one dimension list snake_case__ = [] for i in range(len(_a ) ): snake_case__ = np.shape(data[i] ) snake_case__ = data[i].reshape(1 , shapes[0] * shapes[1] ) snake_case__ = data_listed.getA().tolist()[0] data_expanded.extend(_a ) snake_case__ = np.asarray(_a ) return data_expanded def SCREAMING_SNAKE_CASE__ ( self:str , _a:Optional[Any] ): # expanding matrix to one dimension list snake_case__ = np.asarray(_a ) snake_case__ = np.shape(_a ) snake_case__ = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:List[Any] , _a:Any , _a:str , _a:Tuple , _a:Union[str, Any] ): snake_case__ = [] snake_case__ = 0 for i_map in range(_a ): snake_case__ = np.ones((size_map, size_map) ) for i in range(0 , _a , _a ): for j in range(0 , _a , _a ): snake_case__ = pd_pool[ i_pool ] snake_case__ = i_pool + 1 snake_case__ = np.multiply( _a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_a ) return pd_all def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Dict , _a:List[str] , _a:Optional[int] , _a:Tuple , _a:Any , _a:Dict=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_a )) ) print((''' - - Shape: Teach_Data ''', np.shape(_a )) ) snake_case__ = 0 snake_case__ = [] snake_case__ = 1_00_00 while rp < n_repeat and mse >= error_accuracy: snake_case__ = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_a ) ): # print('------------Learning Image: %d--------------'%p) snake_case__ = np.asmatrix(datas_train[p] ) snake_case__ = np.asarray(datas_teach[p] ) snake_case__ , snake_case__ = self.convolute( _a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case__ = self.pooling(_a , self.size_poolinga ) snake_case__ = np.shape(_a ) snake_case__ = self._expand(_a ) snake_case__ = data_bp_input snake_case__ = np.dot(_a , self.vji.T ) - self.thre_bpa snake_case__ = self.sig(_a ) snake_case__ = np.dot(_a , self.wkj.T ) - self.thre_bpa snake_case__ = self.sig(_a ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- snake_case__ = np.multiply( (data_teach - bp_outa) , np.multiply(_a , (1 - bp_outa) ) ) snake_case__ = np.multiply( np.dot(_a , self.wkj ) , np.multiply(_a , (1 - bp_outa) ) ) snake_case__ = np.dot(_a , self.vji ) snake_case__ = pd_i_all / (self.size_poolinga * self.size_poolinga) snake_case__ = pd_conva_pooled.T.getA().tolist() snake_case__ = self._calculate_gradient_from_pool( _a , _a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): snake_case__ = self._expand_mat(pd_conva_all[k_conv] ) snake_case__ = self.rate_weight * np.dot(_a , _a ) snake_case__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) snake_case__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer snake_case__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight snake_case__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight snake_case__ = self.thre_bpa - pd_k_all * self.rate_thre snake_case__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image snake_case__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) snake_case__ = rp + 1 snake_case__ = error_count / patterns all_mse.append(_a ) def draw_error(): snake_case__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_a , '''+-''' ) plt.plot(_a , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_a , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] ): # model predict snake_case__ = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_a )) ) for p in range(len(_a ) ): snake_case__ = np.asmatrix(datas_test[p] ) snake_case__ , snake_case__ = self.convolute( _a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case__ = self.pooling(_a , self.size_poolinga ) snake_case__ = self._expand(_a ) snake_case__ = data_bp_input snake_case__ = bp_outa * self.vji.T - self.thre_bpa snake_case__ = self.sig(_a ) snake_case__ = bp_outa * self.wkj.T - self.thre_bpa snake_case__ = self.sig(_a ) produce_out.extend(bp_outa.getA().tolist() ) snake_case__ = [list(map(self.do_round , _a ) ) for each in produce_out] return np.asarray(_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str ): # return the data of image after convoluting process so we can check it out snake_case__ = np.asmatrix(_a ) snake_case__ , snake_case__ = self.convolute( _a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case__ = self.pooling(_a , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
33
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = ['image_processor', 'tokenizer'] __lowercase : str = 'AutoImageProcessor' __lowercase : Dict = 'AutoTokenizer' def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ): snake_case__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) snake_case__ = kwargs.pop('''feature_extractor''' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) snake_case__ = self.image_processor snake_case__ = False def __call__( self:Optional[int] , *_a:str , **_a:int ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) snake_case__ = kwargs.pop('''images''' , _a ) snake_case__ = kwargs.pop('''text''' , _a ) if len(_a ) > 0: snake_case__ = args[0] snake_case__ = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case__ = self.image_processor(_a , *_a , **_a ) if text is not None: snake_case__ = self.tokenizer(_a , **_a ) if text is None: return inputs elif images is None: return encodings else: snake_case__ = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ): return self.tokenizer.decode(*_a , **_a ) @contextmanager def SCREAMING_SNAKE_CASE__ ( self:Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) snake_case__ = True snake_case__ = self.tokenizer yield snake_case__ = self.image_processor snake_case__ = False def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ): if added_vocab is None: snake_case__ = self.tokenizer.get_added_vocab() snake_case__ = {} while tokens: snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE ) if start_token is None: break snake_case__ = start_token.group(1 ) snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE ) snake_case__ = start_token.group() if end_token is None: snake_case__ = tokens.replace(_a , '''''' ) else: snake_case__ = end_token.group() snake_case__ = re.escape(_a ) snake_case__ = re.escape(_a ) snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE ) if content is not None: snake_case__ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a ) if value: if len(_a ) == 1: snake_case__ = value[0] snake_case__ = value else: # leaf nodes snake_case__ = [] for leaf in content.split(r'''<sep/>''' ): snake_case__ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": snake_case__ = leaf[1:-2] # for categorical special tokens output[key].append(_a ) if len(output[key] ) == 1: snake_case__ = output[key][0] snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a ) if len(_a ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
33
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : str = field(default='image-classification' ,metadata={'include_in_asdict_even_if_is_default': True} ) __lowercase : ClassVar[Features] = Features({'image': Image()} ) __lowercase : ClassVar[Features] = Features({'labels': ClassLabel} ) __lowercase : str = "image" __lowercase : str = "labels" def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Union[str, Any] ): if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , _a ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) snake_case__ = copy.deepcopy(self ) snake_case__ = self.label_schema.copy() snake_case__ = features[self.label_column] snake_case__ = label_schema return task_template @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return { self.image_column: "image", self.label_column: "labels", }
33
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : '''simple docstring''' def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = num_channels snake_case__ = embeddings_size snake_case__ = hidden_sizes snake_case__ = depths snake_case__ = is_training snake_case__ = use_labels snake_case__ = hidden_act snake_case__ = num_labels snake_case__ = scope snake_case__ = len(_a ) snake_case__ = out_features snake_case__ = out_indices snake_case__ = num_groups def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ): snake_case__ = BitModel(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ): snake_case__ = self.num_labels snake_case__ = BitForImageClassification(_a ) model.to(_a ) model.eval() snake_case__ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ): snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ = None snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ = config_and_inputs snake_case__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowercase : int = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) __lowercase : Tuple = False __lowercase : Optional[Any] = False __lowercase : str = False __lowercase : Tuple = False __lowercase : Tuple = False def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = BitModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return @unittest.skip(reason='''Bit does not output attentions''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(_a ) snake_case__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(config=_a ) for name, module in model.named_modules(): if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ): snake_case__ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): snake_case__ = model(**self._prepare_for_class(_a , _a ) ) snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ = self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ = layer_type snake_case__ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ = True check_hidden_states_output(_a , _a , _a ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = BitModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def SCREAMING_SNAKE_CASE ( ) -> Any: snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a ) # forward pass with torch.no_grad(): snake_case__ = model(**_a ) # verify the logits snake_case__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _a ) snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @require_torch class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else () __lowercase : int = BitConfig __lowercase : Any = False def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = BitModelTester(self )
33
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowerCamelCase__ : List[str] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowerCamelCase__ : List[str] = [0, 2_5, 5_0] lowerCamelCase__ : Any = [2_5, 5_0, 7_5] lowerCamelCase__ : Optional[int] = fuzz.membership.trimf(X, abca) lowerCamelCase__ : Optional[Any] = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowerCamelCase__ : List[Any] = np.ones(7_5) lowerCamelCase__ : Dict = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowerCamelCase__ : List[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowerCamelCase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowerCamelCase__ : Dict = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowerCamelCase__ : str = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowerCamelCase__ : str = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowerCamelCase__ : Union[str, Any] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowerCamelCase__ : List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowerCamelCase__ : Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
33
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCamelCase__ : Any = """\ """ lowerCamelCase__ : List[str] = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ lowerCamelCase__ : Any = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case__ = '''cuda''' else: snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' snake_case__ = AutoModelForCausalLM.from_pretrained(_a ) snake_case__ = model.to(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case__ = model.config.max_length - 1 else: snake_case__ = model.config.max_length snake_case__ = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) snake_case__ = encodings['''input_ids'''] snake_case__ = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case__ = [] snake_case__ = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): snake_case__ = min(start_index + batch_size , len(_a ) ) snake_case__ = encoded_texts[start_index:end_index] snake_case__ = attn_masks[start_index:end_index] if add_start_token: snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) snake_case__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) snake_case__ = encoded_batch with torch.no_grad(): snake_case__ = model(_a , attention_mask=_a ).logits snake_case__ = out_logits[..., :-1, :].contiguous() snake_case__ = labels[..., 1:].contiguous() snake_case__ = attn_mask[..., 1:].contiguous() snake_case__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
33
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase__ : int = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import os from datetime import datetime as dt from github import Github lowerCamelCase__ : int = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] ) snake_case__ = g.get_repo('''huggingface/diffusers''' ) snake_case__ = repo.get_issues(state='''open''' ) for issue in open_issues: snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
33
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = 0 @slow def SCREAMING_SNAKE_CASE__ ( self:Any ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_a ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_a ) , 0 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsInstance(_a , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = AutoConfig.from_pretrained(_a ) self.assertIsInstance(_a , _a ) # Check that tokenizer_type ≠ model_type snake_case__ = AutoTokenizer.from_pretrained(_a , config=_a ) self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_a , '''vocab.txt''' ) ) snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''bert''' , use_fast=_a ) self.assertIsInstance(_a , _a ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_a , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_a , '''merges.txt''' ) ) snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''gpt2''' , use_fast=_a ) self.assertIsInstance(_a , _a ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_a , '''vocab.txt''' ) ) snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''bert''' ) self.assertIsInstance(_a , _a ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_a , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_a , '''merges.txt''' ) ) snake_case__ = AutoTokenizer.from_pretrained(_a , tokenizer_type='''gpt2''' ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): with pytest.raises(_a ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:Dict ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: snake_case__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) ) if isinstance(_a , _a ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _a ) else: self.assertEqual(tokenizer.do_lower_case , _a ) self.assertEqual(tokenizer.model_max_length , 5_12 ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _a , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): snake_case__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai snake_case__ = TOKENIZER_MAPPING.values() snake_case__ = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_a ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:int ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_a ) , _a ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _a ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_a ) snake_case__ = '''Hello, world. How are you?''' snake_case__ = tokenizer.tokenize(_a ) self.assertEqual('''[UNK]''' , tokens[0] ) snake_case__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_a ) snake_case__ = tokenizer.tokenize(_a ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_a ) , _a ) self.assertEqual(tokenizer.model_max_length , 5_12 ) self.assertEqual(tokenizer.vocab_size , 3_00_00 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsInstance(_a , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): # Check we can load the tokenizer config of an online model. snake_case__ = get_tokenizer_config('''bert-base-cased''' ) snake_case__ = config.pop('''_commit_hash''' , _a ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_a , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. snake_case__ = get_tokenizer_config(_a ) self.assertDictEqual(_a , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. snake_case__ = AutoTokenizer.from_pretrained(_a ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a ) snake_case__ = get_tokenizer_config(_a ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): try: AutoConfig.register('''custom''' , _a ) AutoTokenizer.register(_a , slow_tokenizer_class=_a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoTokenizer.register(_a , slow_tokenizer_class=_a ) snake_case__ = CustomTokenizer.from_pretrained(_a ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:str ): try: AutoConfig.register('''custom''' , _a ) # Can register in two steps AutoTokenizer.register(_a , slow_tokenizer_class=_a ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_a , fast_tokenizer_class=_a ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _a , slow_tokenizer_class=_a , fast_tokenizer_class=_a ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoTokenizer.register(_a , fast_tokenizer_class=_a ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ = BertTokenizerFast.from_pretrained(_a ) bert_tokenizer.save_pretrained(_a ) snake_case__ = CustomTokenizerFast.from_pretrained(_a ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) self.assertIsInstance(_a , _a ) snake_case__ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a ) snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a , use_fast=_a ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self:str ): class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Tuple = False class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = NewTokenizer __lowercase : List[str] = False try: AutoConfig.register('''custom''' , _a ) AutoTokenizer.register(_a , slow_tokenizer_class=_a ) AutoTokenizer.register(_a , fast_tokenizer_class=_a ) # If remote code is not set, the default is to use local snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_a ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_a ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version snake_case__ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_a , use_fast=_a ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def SCREAMING_SNAKE_CASE__ ( self:str ): with self.assertRaisesRegex( _a , '''bert-base is not a local folder and is not a valid model identifier''' ): snake_case__ = AutoTokenizer.from_pretrained('''bert-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): with self.assertRaisesRegex( _a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): snake_case__ = AutoTokenizer.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Make sure we have cached the tokenizer. snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: snake_case__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
33
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = _distribute_shards(**__lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if expected is RuntimeError: with pytest.raises(__lowerCAmelCase ): _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) else: snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) assert out == expected
33
1
import numpy as np def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray: return np.where(vector > 0 , __lowerCAmelCase , (alpha * (np.exp(__lowerCAmelCase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
33
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : str = IFImgaImgSuperResolutionPipeline __lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} __lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) __lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'} def SCREAMING_SNAKE_CASE__ ( self:Dict ): return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:str ): self._test_save_load_local() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
33
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : int = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Dict = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import math class __magic_name__ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ): snake_case__ = 0.0 snake_case__ = 0.0 for i in range(len(_a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ): for i in range(len(_a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def SCREAMING_SNAKE_CASE ( ) -> None: # Training Examples ( m, n ) snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case__ = SelfOrganizingMap() snake_case__ = 3 snake_case__ = 0.5 for _ in range(__lowerCAmelCase ): for j in range(len(__lowerCAmelCase ) ): # training sample snake_case__ = training_samples[j] # Compute the winning vector snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # Update the winning vector snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # classify test sample snake_case__ = [0, 0, 0, 1] snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
33
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase__ : List[Any] = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[Any] = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes snake_case__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] snake_case__ = [] snake_case__ = 0 snake_case__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: snake_case__ = [] snake_case__ = -1 for i in range(__lowerCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: snake_case__ = i total_time += burst_time[target_process] completed += 1 snake_case__ = 0 snake_case__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7] lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0] lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
33
1
lowerCamelCase__ : List[str] = """Alexander Joslin""" import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} snake_case__ = Stack() snake_case__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCAmelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCAmelCase ) elif i == ")": # RULE 4 snake_case__ = operator_stack.peek() operator_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase ) operand_stack.push(__lowerCAmelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
33
lowerCamelCase__ : List[str] = """Alexander Joslin""" import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} snake_case__ = Stack() snake_case__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCAmelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCAmelCase ) elif i == ")": # RULE 4 snake_case__ = operator_stack.peek() operator_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase ) operand_stack.push(__lowerCAmelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
33
1
from __future__ import annotations from fractions import Fraction def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]: snake_case__ = [] snake_case__ = 11 snake_case__ = int('''1''' + '''0''' * digit_len ) for num in range(__lowerCAmelCase , __lowerCAmelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 snake_case__ = 10 return solutions def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 2 ) -> int: snake_case__ = 1.0 for fraction in fraction_list(__lowerCAmelCase ): snake_case__ = Fraction(__lowerCAmelCase ) result *= frac.denominator / frac.numerator return int(__lowerCAmelCase ) if __name__ == "__main__": print(solution())
33
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowerCamelCase__ : int = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ): warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _a , ) super().__init__(*_a , **_a )
33
1
import datasets from .evaluate import evaluate lowerCamelCase__ : Dict = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ lowerCamelCase__ : Dict = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ lowerCamelCase__ : List[str] = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:Tuple , _a:Optional[Any] ): snake_case__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} snake_case__ = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] snake_case__ = evaluate(dataset=_a , predictions=_a ) return score
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ : Tuple = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import itertools import string from collections.abc import Generator, Iterable def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Generator[tuple[str, ...], None, None]: snake_case__ = iter(__lowerCAmelCase ) while True: snake_case__ = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) ) if not chunk: return yield chunk def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: snake_case__ = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) snake_case__ = '''''' if len(__lowerCAmelCase ) < 2: return dirty for i in range(len(__lowerCAmelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__lowerCAmelCase ) & 1: clean += "X" return clean def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]: # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) snake_case__ = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler snake_case__ = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__lowerCAmelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__lowerCAmelCase ) return table def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str: snake_case__ = generate_table(__lowerCAmelCase ) snake_case__ = prepare_input(__lowerCAmelCase ) snake_case__ = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__lowerCAmelCase , 2 ): snake_case__ , snake_case__ = divmod(table.index(__lowerCAmelCase ) , 5 ) snake_case__ , snake_case__ = divmod(table.index(__lowerCAmelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str: snake_case__ = generate_table(__lowerCAmelCase ) snake_case__ = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__lowerCAmelCase , 2 ): snake_case__ , snake_case__ = divmod(table.index(__lowerCAmelCase ) , 5 ) snake_case__ , snake_case__ = divmod(table.index(__lowerCAmelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
33
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]: snake_case__ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Dict = StableDiffusionLatentUpscalePipeline __lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } __lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} __lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowercase : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __lowercase : List[Any] = frozenset([] ) __lowercase : Any = True @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = 1 snake_case__ = 4 snake_case__ = (16, 16) snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a ) return image def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): torch.manual_seed(0 ) snake_case__ = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) snake_case__ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) snake_case__ = CLIPTextModel(_a ) snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''cpu''' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = pipe(**_a ).images snake_case__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) snake_case__ = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): super().test_save_load_local(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:str ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**_a ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = self.get_dummy_inputs(_a ) snake_case__ = 2 snake_case__ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue snake_case__ = getattr(_a , scheduler_enum.name ) snake_case__ = scheduler_cls.from_config(pipe.scheduler.config ) snake_case__ = pipe(**_a )[0] outputs.append(_a ) assert check_same_shape(_a ) @require_torch_gpu @slow class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = torch.manual_seed(33 ) snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' snake_case__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) snake_case__ = upscaler( prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0] snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-2
33
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Dict = logging.get_logger(__name__) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Any = 'encoder-decoder' __lowercase : Optional[Any] = True def __init__( self:Dict , **_a:Optional[Any] ): super().__init__(**_a ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case__ = kwargs.pop('''encoder''' ) snake_case__ = encoder_config.pop('''model_type''' ) snake_case__ = kwargs.pop('''decoder''' ) snake_case__ = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case__ = AutoConfig.for_model(_a , **_a ) snake_case__ = AutoConfig.for_model(_a , **_a ) snake_case__ = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , _a:PretrainedConfig , _a:PretrainedConfig , **_a:int ): logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case__ = True snake_case__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = copy.deepcopy(self.__dict__ ) snake_case__ = self.encoder.to_dict() snake_case__ = self.decoder.to_dict() snake_case__ = self.__class__.model_type return output
33
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''ZinengTang/tvlt-base''' snake_case__ = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _a ) self.assertIsInstance(processor.image_processor , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = feature_extractor(_a , return_tensors='''np''' ) snake_case__ = processor(audio=_a , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = image_processor(_a , return_tensors='''np''' ) snake_case__ = processor(images=_a , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) snake_case__ = np.ones([1_20_00] ) snake_case__ = np.ones([3, 2_24, 2_24] ) snake_case__ = processor(audio=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.get_image_processor() snake_case__ = self.get_feature_extractor() snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
33
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['image_processor', 'tokenizer'] __lowercase : Any = 'OwlViTImageProcessor' __lowercase : Optional[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self:Optional[int] , _a:List[Any]=None , _a:str=None , **_a:Any ): snake_case__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) snake_case__ = kwargs.pop('''feature_extractor''' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) def __call__( self:Dict , _a:Any=None , _a:Optional[Any]=None , _a:Tuple=None , _a:List[str]="max_length" , _a:Dict="np" , **_a:Any ): if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )): snake_case__ = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )] elif isinstance(_a , _a ) and isinstance(text[0] , _a ): snake_case__ = [] # Maximum number of queries across batch snake_case__ = max([len(_a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_a ) != max_num_queries: snake_case__ = t + [''' '''] * (max_num_queries - len(_a )) snake_case__ = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a ) encodings.append(_a ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": snake_case__ = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) snake_case__ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp snake_case__ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) snake_case__ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch snake_case__ = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) snake_case__ = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf snake_case__ = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) snake_case__ = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) snake_case__ = BatchEncoding() snake_case__ = input_ids snake_case__ = attention_mask if query_images is not None: snake_case__ = BatchEncoding() snake_case__ = self.image_processor( _a , return_tensors=_a , **_a ).pixel_values snake_case__ = query_pixel_values if images is not None: snake_case__ = self.image_processor(_a , return_tensors=_a , **_a ) if text is not None and images is not None: snake_case__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: snake_case__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , *_a:Tuple , **_a:Optional[Any] ): return self.image_processor.post_process(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , *_a:int , **_a:Optional[Any] ): return self.image_processor.post_process_object_detection(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Any , *_a:Any , **_a:List[Any] ): return self.image_processor.post_process_image_guided_detection(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , *_a:Optional[Any] , **_a:List[str] ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , *_a:int , **_a:str ): return self.tokenizer.decode(*_a , **_a ) @property def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = 'data2vec-vision' def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ): super().__init__(**_a ) snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = use_mask_token snake_case__ = use_absolute_position_embeddings snake_case__ = use_relative_position_bias snake_case__ = use_shared_relative_position_bias snake_case__ = layer_scale_init_value snake_case__ = drop_path_rate snake_case__ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case__ = out_indices snake_case__ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case__ = use_auxiliary_head snake_case__ = auxiliary_loss_weight snake_case__ = auxiliary_channels snake_case__ = auxiliary_num_convs snake_case__ = auxiliary_concat_input snake_case__ = semantic_loss_ignore_index class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Any = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return 1e-4
33
1
from ..utils import DummyObject, requires_backends class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['flax'] def __init__( self:str , *_a:Tuple , **_a:str ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:str , *_a:Optional[int] , **_a:Optional[Any] ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:str , *_a:Union[str, Any] , **_a:List[str] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : List[Any] = ['flax'] def __init__( self:Dict , *_a:int , **_a:List[str] ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , *_a:List[str] , **_a:List[Any] ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Any , *_a:Tuple , **_a:List[str] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = ['flax'] def __init__( self:List[Any] , *_a:Any , **_a:int ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Any , *_a:List[str] , **_a:Tuple ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:int , *_a:str , **_a:int ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : Optional[Any] = ['flax'] def __init__( self:Tuple , *_a:List[Any] , **_a:Tuple ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Union[str, Any] , *_a:Any , **_a:int ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , *_a:Optional[int] , **_a:List[str] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : Optional[Any] = ['flax'] def __init__( self:List[str] , *_a:Any , **_a:int ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Any , *_a:Optional[Any] , **_a:Optional[int] ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Optional[int] , *_a:Any , **_a:Tuple ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : List[Any] = ['flax'] def __init__( self:Optional[Any] , *_a:int , **_a:List[str] ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Union[str, Any] , *_a:Tuple , **_a:Any ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , *_a:Any , **_a:Any ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : Optional[Any] = ['flax'] def __init__( self:Any , *_a:Any , **_a:Tuple ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:int , *_a:Dict , **_a:Union[str, Any] ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:int , *_a:int , **_a:List[str] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : Optional[Any] = ['flax'] def __init__( self:Any , *_a:Optional[int] , **_a:Any ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Optional[Any] , *_a:List[Any] , **_a:Tuple ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[str] , *_a:Any , **_a:List[Any] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : str = ['flax'] def __init__( self:Optional[Any] , *_a:Any , **_a:List[Any] ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Any , *_a:List[Any] , **_a:List[str] ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:str , *_a:Dict , **_a:List[str] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : List[str] = ['flax'] def __init__( self:int , *_a:Union[str, Any] , **_a:Tuple ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , *_a:Optional[int] , **_a:int ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Union[str, Any] , *_a:List[str] , **_a:Optional[Any] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : List[str] = ['flax'] def __init__( self:str , *_a:List[str] , **_a:List[Any] ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , *_a:int , **_a:Union[str, Any] ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Tuple , *_a:Optional[int] , **_a:List[str] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : Tuple = ['flax'] def __init__( self:Optional[Any] , *_a:Optional[int] , **_a:Optional[Any] ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Any , *_a:Any , **_a:Any ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Any , *_a:Optional[Any] , **_a:Union[str, Any] ): requires_backends(cls , ['''flax'''] ) class __magic_name__ (metaclass=snake_case_ ): '''simple docstring''' __lowercase : List[Any] = ['flax'] def __init__( self:Optional[Any] , *_a:Optional[Any] , **_a:Any ): requires_backends(self , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , *_a:Union[str, Any] , **_a:Dict ): requires_backends(cls , ['''flax'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Optional[int] , *_a:Any , **_a:str ): requires_backends(cls , ['''flax'''] )
33
import os import sys lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCamelCase__ : Optional[int] = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple: return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
33
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' ,snake_case_ ,) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Tuple = RobertaConfig __lowercase : List[str] = 'roberta' def __init__( self:List[str] , _a:Union[str, Any] ): super().__init__(_a ) snake_case__ = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' ,snake_case_ ,) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = RobertaConfig __lowercase : int = 'roberta' def __init__( self:str , _a:Optional[Any] ): super().__init__(_a ) snake_case__ = config.num_labels snake_case__ = config.num_hidden_layers snake_case__ = DeeRobertaModel(_a ) snake_case__ = nn.Dropout(config.hidden_dropout_prob ) snake_case__ = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:Any=None , _a:Any=None , _a:str=None , _a:Optional[Any]=None , _a:Union[str, Any]=None , _a:Optional[Any]=None , _a:Dict=None , _a:str=-1 , _a:Optional[int]=False , ): snake_case__ = self.num_layers try: snake_case__ = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) snake_case__ = outputs[1] snake_case__ = self.dropout(_a ) snake_case__ = self.classifier(_a ) snake_case__ = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: snake_case__ = e.message snake_case__ = e.exit_layer snake_case__ = outputs[0] if not self.training: snake_case__ = entropy(_a ) snake_case__ = [] snake_case__ = [] if labels is not None: if self.num_labels == 1: # We are doing regression snake_case__ = MSELoss() snake_case__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: snake_case__ = CrossEntropyLoss() snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits snake_case__ = [] for highway_exit in outputs[-1]: snake_case__ = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression snake_case__ = MSELoss() snake_case__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: snake_case__ = CrossEntropyLoss() snake_case__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: snake_case__ = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: snake_case__ = (loss,) + outputs if not self.training: snake_case__ = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: snake_case__ = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
33
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : str = (CMStochasticIterativeScheduler,) __lowercase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ): snake_case__ = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**_a ) return config def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = 10 snake_case__ = self.get_scheduler_config() snake_case__ = self.scheduler_classes[0](**_a ) scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps[0] snake_case__ = scheduler.timesteps[1] snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample snake_case__ = scheduler.step(_a , _a , _a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self:Any ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=_a ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = 1 scheduler.set_timesteps(_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(_a ): # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [1_06, 0] scheduler.set_timesteps(timesteps=_a ) snake_case__ = scheduler.timesteps snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input snake_case__ = scheduler.scale_model_input(_a , _a ) # 2. predict noise residual snake_case__ = model(_a , _a ) # 3. predict previous sample x_t-1 snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 15, 0] with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [39, 30, 12, 1, 0] snake_case__ = len(_a ) with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) snake_case__ = [scheduler.config.num_train_timesteps] with self.assertRaises( _a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_a )
33
1
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowerCamelCase__ : Optional[Any] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ lowerCamelCase__ : int = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ lowerCamelCase__ : Any = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any , _a:Tuple , _a:str=None , _a:str=None , _a:List[Any]=None , _a:Dict=None , _a:List[Any]="auto" , _a:Optional[int]=-1 , _a:int=0.9 , _a:str=5 , _a:List[str]=5_00 , _a:Tuple="gpt2-large" , _a:Union[str, Any]=-1 , _a:Optional[int]=10_24 , _a:Optional[Any]=25 , _a:Optional[Any]=5 , _a:Optional[Any]=True , _a:List[str]=25 , ): snake_case__ = compute_mauve( p_text=_a , q_text=_a , p_features=_a , q_features=_a , p_tokens=_a , q_tokens=_a , num_buckets=_a , pca_max_data=_a , kmeans_explained_var=_a , kmeans_num_redo=_a , kmeans_max_iter=_a , featurize_model_name=_a , device_id=_a , max_text_length=_a , divergence_curve_discretization_size=_a , mauve_scaling_factor=_a , verbose=_a , seed=_a , ) return out
33
import numpy as np def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray: return vector * sigmoid(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase__ : List[Any] = logging.get_logger(__name__) class __magic_name__ (snake_case_ ,snake_case_ ): '''simple docstring''' __lowercase : Optional[Any] = 'maskformer-swin' __lowercase : Tuple = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self:Optional[Any] , _a:int=2_24 , _a:Any=4 , _a:str=3 , _a:Tuple=96 , _a:Any=[2, 2, 6, 2] , _a:Any=[3, 6, 12, 24] , _a:Union[str, Any]=7 , _a:List[Any]=4.0 , _a:Optional[Any]=True , _a:Union[str, Any]=0.0 , _a:int=0.0 , _a:Optional[int]=0.1 , _a:Tuple="gelu" , _a:Union[str, Any]=False , _a:List[Any]=0.02 , _a:List[str]=1e-5 , _a:str=None , _a:Tuple=None , **_a:Dict , ): super().__init__(**_a ) snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = embed_dim snake_case__ = depths snake_case__ = len(_a ) snake_case__ = num_heads snake_case__ = window_size snake_case__ = mlp_ratio snake_case__ = qkv_bias snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = drop_path_rate snake_case__ = hidden_act snake_case__ = use_absolute_embeddings snake_case__ = layer_norm_eps snake_case__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ = int(embed_dim * 2 ** (len(_a ) - 1) ) snake_case__ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )] snake_case__ , snake_case__ = get_aligned_output_features_output_indices( out_features=_a , out_indices=_a , stage_names=self.stage_names )
33
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int: snake_case__ = set() snake_case__ = 0 snake_case__ = n + 1 # maximum limit for a in range(2 , __lowerCAmelCase ): for b in range(2 , __lowerCAmelCase ): snake_case__ = a**b # calculates the current power collect_powers.add(__lowerCAmelCase ) # adds the result to the set return len(__lowerCAmelCase ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
33
1
lowerCamelCase__ : List[Any] = """0.18.2""" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
33
from copy import deepcopy class __magic_name__ : '''simple docstring''' def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ): if arr is None and size is not None: snake_case__ = size snake_case__ = [0] * size elif arr is not None: self.init(_a ) else: raise ValueError('''Either arr or size must be specified''' ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ): snake_case__ = len(_a ) snake_case__ = deepcopy(_a ) for i in range(1 , self.size ): snake_case__ = self.next_(_a ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ = self.next_(_a ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ = self.next_(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): self.add(_a , value - self.get(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ): if right == 0: return 0 snake_case__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ = self.prev(_a ) return result def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): return self.prefix(_a ) - self.prefix(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): return self.query(_a , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): value -= self.tree[0] if value < 0: return -1 snake_case__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : Dict = logging.get_logger(__name__) lowerCamelCase__ : str = ["""model.decoder.embed_positions.weights"""] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]: if "emb" in name: snake_case__ = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: snake_case__ = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: snake_case__ = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: snake_case__ = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: snake_case__ = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: snake_case__ = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: snake_case__ = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: snake_case__ = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: snake_case__ = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: snake_case__ = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: snake_case__ = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple[Dict, Dict]: snake_case__ = list(state_dict.keys() ) snake_case__ = {} for key in keys: snake_case__ = state_dict.pop(__lowerCAmelCase ) snake_case__ = rename_keys(__lowerCAmelCase ) if "in_proj_weight" in key: # split fused qkv proj snake_case__ = val[:hidden_size, :] snake_case__ = val[hidden_size : 2 * hidden_size, :] snake_case__ = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: snake_case__ = val else: snake_case__ = val return state_dict, enc_dec_proj_state_dict def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values snake_case__ = 1024 snake_case__ = 24 snake_case__ = 16 elif checkpoint == "medium": snake_case__ = 1536 snake_case__ = 48 snake_case__ = 24 elif checkpoint == "large": snake_case__ = 2048 snake_case__ = 48 snake_case__ = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) snake_case__ = MusicgenDecoderConfig( hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , ) return config @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="cpu" ) -> Union[str, Any]: snake_case__ = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase ) snake_case__ = decoder_config_from_checkpoint(__lowerCAmelCase ) snake_case__ = fairseq_model.lm.state_dict() snake_case__ , snake_case__ = rename_state_dict( __lowerCAmelCase , hidden_size=decoder_config.hidden_size ) snake_case__ = TaEncoderModel.from_pretrained('''t5-base''' ) snake_case__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) snake_case__ = MusicgenForCausalLM(__lowerCAmelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection snake_case__ , snake_case__ = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__lowerCAmelCase ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model snake_case__ = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase ) # check we can do a forward pass snake_case__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) snake_case__ = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): snake_case__ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor snake_case__ = AutoTokenizer.from_pretrained('''t5-base''' ) snake_case__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) snake_case__ = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) # set the appropriate bos/pad token ids snake_case__ = 2048 snake_case__ = 2048 # set other default generation config params snake_case__ = int(30 * audio_encoder.config.frame_rate ) snake_case__ = True snake_case__ = 3.0 if pytorch_dump_folder is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__lowerCAmelCase ) processor.push_to_hub(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) lowerCamelCase__ : Optional[int] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
33
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __magic_name__ : '''simple docstring''' __lowercase : int = BlenderbotConfig __lowercase : Any = {} __lowercase : Optional[Any] = 'gelu' def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a ) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ): snake_case__ = TFBlenderbotModel(config=_a ).get_decoder() snake_case__ = inputs_dict['''input_ids'''] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict['''attention_mask'''][:1, :] snake_case__ = inputs_dict['''head_mask'''] snake_case__ = 1 # first forward pass snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a ) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case__ = model(_a , attention_mask=_a )[0] snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple: if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __lowercase : Tuple = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __lowercase : Any = True __lowercase : int = False __lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFBlenderbotModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) @require_tokenizers @require_tf class __magic_name__ (unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.'] __lowercase : Optional[int] = 'facebook/blenderbot-400M-distill' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) snake_case__ = self.model.generate( model_inputs.input_ids , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
33
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase__ : Tuple = logging.get_logger(__name__) lowerCamelCase__ : str = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowerCamelCase__ : List[str] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } lowerCamelCase__ : str = {"""facebook/blenderbot_small-90M""": 5_1_2} def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]: snake_case__ = set() snake_case__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ = char snake_case__ = set(__lowerCAmelCase ) return pairs class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Tuple = VOCAB_FILES_NAMES __lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Union[str, Any] = ['input_ids', 'attention_mask'] def __init__( self:Dict , _a:List[Any] , _a:List[Any] , _a:Dict="__start__" , _a:Optional[Any]="__end__" , _a:Tuple="__unk__" , _a:int="__null__" , **_a:Optional[Any] , ): super().__init__(unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a ) with open(_a , encoding='''utf-8''' ) as vocab_handle: snake_case__ = json.load(_a ) snake_case__ = {v: k for k, v in self.encoder.items()} with open(_a , encoding='''utf-8''' ) as merges_handle: snake_case__ = merges_handle.read().split('''\n''' )[1:-1] snake_case__ = [tuple(merge.split() ) for merge in merges] snake_case__ = dict(zip(_a , range(len(_a ) ) ) ) snake_case__ = {} @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return len(self.encoder ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str ): if token in self.cache: return self.cache[token] snake_case__ = re.sub('''([.,!?()])''' , r''' \1''' , _a ) snake_case__ = re.sub('''(\')''' , r''' \1 ''' , _a ) snake_case__ = re.sub(r'''\s{2,}''' , ''' ''' , _a ) if "\n" in token: snake_case__ = token.replace('''\n''' , ''' __newln__''' ) snake_case__ = token.split(''' ''' ) snake_case__ = [] for token in tokens: if not len(_a ): continue snake_case__ = token.lower() snake_case__ = tuple(_a ) snake_case__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) snake_case__ = get_pairs(_a ) if not pairs: words.append(_a ) continue while True: snake_case__ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break snake_case__ , snake_case__ = bigram snake_case__ = [] snake_case__ = 0 while i < len(_a ): try: snake_case__ = word.index(_a , _a ) new_word.extend(word[i:j] ) snake_case__ = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ = tuple(_a ) snake_case__ = new_word if len(_a ) == 1: break else: snake_case__ = get_pairs(_a ) snake_case__ = '''@@ '''.join(_a ) snake_case__ = word[:-4] snake_case__ = word words.append(_a ) return " ".join(_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ): snake_case__ = [] snake_case__ = re.findall(r'''\S+\n?''' , _a ) for token in words: split_tokens.extend(list(self.bpe(_a ).split(''' ''' ) ) ) return split_tokens def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ): snake_case__ = token.lower() return self.encoder.get(_a , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int ): return self.decoder.get(_a , self.unk_token ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[str] ): snake_case__ = ''' '''.join(_a ).replace('''@@ ''' , '''''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str , _a:Optional[str] = None ): if not os.path.isdir(_a ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_a , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' ) snake_case__ = 0 with open(_a , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) snake_case__ = token_index writer.write(''' '''.join(_a ) + '''\n''' ) index += 1 return vocab_file, merge_file
33
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = 0 def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:str ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop('''image_processor_type''' ) snake_case__ = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved snake_case__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): with self.assertRaisesRegex( _a , '''clip-base is not a local folder and is not a valid model identifier''' ): snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): with self.assertRaisesRegex( _a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = True try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
33
1
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCamelCase__ : int = datasets.utils.logging.get_logger(__name__) @dataclass class __magic_name__ (datasets.BuilderConfig ): '''simple docstring''' __lowercase : Optional[datasets.Features] = None __lowercase : str = "utf-8" __lowercase : Optional[str] = None __lowercase : Optional[str] = None __lowercase : bool = True # deprecated __lowercase : Optional[int] = None # deprecated __lowercase : int = 10 << 20 # 10MB __lowercase : Optional[bool] = None class __magic_name__ (datasets.ArrowBasedBuilder ): '''simple docstring''' __lowercase : int = JsonConfig def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) snake_case__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case__ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): snake_case__ = data_files if isinstance(_a , _a ): snake_case__ = [files] snake_case__ = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] snake_case__ = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): snake_case__ = [files] snake_case__ = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:pa.Table ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): snake_case__ = self.config.features.arrow_schema.field(_a ).type snake_case__ = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example snake_case__ = table_cast(_a , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[Any] ): for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ = json.load(_a ) # We keep only the field we are interested in snake_case__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_a , (list, tuple) ): snake_case__ = set().union(*[row.keys() for row in dataset] ) snake_case__ = {col: [row.get(_a ) for row in dataset] for col in keys} else: snake_case__ = dataset snake_case__ = pa.Table.from_pydict(_a ) yield file_idx, self._cast_table(_a ) # If the file has one json object per line else: with open(_a , '''rb''' ) as f: snake_case__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small snake_case__ = max(self.config.chunksize // 32 , 16 << 10 ) snake_case__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: snake_case__ = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_a ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": snake_case__ = batch.decode(self.config.encoding , errors=_a ).encode('''utf-8''' ) try: while True: try: snake_case__ = paj.read_json( io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_a , pa.ArrowInvalid ) and "straddling" not in str(_a ) or block_size > len(_a ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(_a )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case__ = json.load(_a ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_a , _a ): # list is the only sequence type supported in JSON try: snake_case__ = set().union(*[row.keys() for row in dataset] ) snake_case__ = {col: [row.get(_a ) for row in dataset] for col in keys} snake_case__ = pa.Table.from_pydict(_a ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(_a ) break else: logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_a ) batch_idx += 1
33
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int: snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ = '''''' else: snake_case__ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: snake_case__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: snake_case__ = dct.pop(__lowerCAmelCase ) snake_case__ = val def SCREAMING_SNAKE_CASE ( ) -> str: snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = ViTConfig() snake_case__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case__ = True snake_case__ = int(vit_name[-12:-10] ) snake_case__ = int(vit_name[-9:-6] ) else: snake_case__ = 1000 snake_case__ = '''huggingface/label-files''' snake_case__ = '''imagenet-1k-id2label.json''' snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(vit_name[-6:-4] ) snake_case__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif vit_name[9:].startswith('''small''' ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 else: pass else: if vit_name[4:].startswith('''small''' ): snake_case__ = 768 snake_case__ = 2304 snake_case__ = 8 snake_case__ = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 elif vit_name[4:].startswith('''huge''' ): snake_case__ = 1280 snake_case__ = 5120 snake_case__ = 32 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() if base_model: remove_classification_head_(__lowerCAmelCase ) snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ = ViTModel(__lowerCAmelCase ).eval() else: snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval() model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case__ = DeiTImageProcessor(size=config.image_size ) else: snake_case__ = ViTImageProcessor(size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case__ = encoding['''pixel_values'''] snake_case__ = model(__lowerCAmelCase ) if base_model: snake_case__ = timm_model.forward_features(__lowerCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 ) else: snake_case__ = timm_model(__lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase__ : str = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
33
1
from cva import destroyAllWindows, imread, imshow, waitKey def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: # getting number of pixels in the image snake_case__ , snake_case__ = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(__lowerCAmelCase ): for j in range(__lowerCAmelCase ): snake_case__ = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image lowerCamelCase__ : Any = imread("""image_data/lena.jpg""", 1) # convert to its negative lowerCamelCase__ : str = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
33
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = ['image_processor', 'tokenizer'] __lowercase : str = 'AutoImageProcessor' __lowercase : Dict = 'AutoTokenizer' def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ): snake_case__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) snake_case__ = kwargs.pop('''feature_extractor''' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) snake_case__ = self.image_processor snake_case__ = False def __call__( self:Optional[int] , *_a:str , **_a:int ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) snake_case__ = kwargs.pop('''images''' , _a ) snake_case__ = kwargs.pop('''text''' , _a ) if len(_a ) > 0: snake_case__ = args[0] snake_case__ = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case__ = self.image_processor(_a , *_a , **_a ) if text is not None: snake_case__ = self.tokenizer(_a , **_a ) if text is None: return inputs elif images is None: return encodings else: snake_case__ = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ): return self.tokenizer.decode(*_a , **_a ) @contextmanager def SCREAMING_SNAKE_CASE__ ( self:Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) snake_case__ = True snake_case__ = self.tokenizer yield snake_case__ = self.image_processor snake_case__ = False def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ): if added_vocab is None: snake_case__ = self.tokenizer.get_added_vocab() snake_case__ = {} while tokens: snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE ) if start_token is None: break snake_case__ = start_token.group(1 ) snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE ) snake_case__ = start_token.group() if end_token is None: snake_case__ = tokens.replace(_a , '''''' ) else: snake_case__ = end_token.group() snake_case__ = re.escape(_a ) snake_case__ = re.escape(_a ) snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE ) if content is not None: snake_case__ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a ) if value: if len(_a ) == 1: snake_case__ = value[0] snake_case__ = value else: # leaf nodes snake_case__ = [] for leaf in content.split(r'''<sep/>''' ): snake_case__ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": snake_case__ = leaf[1:-2] # for categorical special tokens output[key].append(_a ) if len(output[key] ) == 1: snake_case__ = output[key][0] snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a ) if len(_a ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
33
1
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case__ = mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: snake_case__ = max( mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , ) snake_case__ = val return f[i][j] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case__ = dp[i - 1][w_] return dp[n][w_], dp def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: if not (isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) snake_case__ = len(__lowerCAmelCase ) if num_items != len(__lowerCAmelCase ): snake_case__ = ( '''The number of weights must be the same as the number of values.\n''' F"""But got {num_items} weights and {len(__lowerCAmelCase )} values""" ) raise ValueError(__lowerCAmelCase ) for i in range(__lowerCAmelCase ): if not isinstance(wt[i] , __lowerCAmelCase ): snake_case__ = ( '''All weights must be integers but got weight of ''' F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(__lowerCAmelCase ) snake_case__ , snake_case__ = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) snake_case__ = set() _construct_solution(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return optimal_val, example_optional_set def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase ) else: optimal_set.add(__lowerCAmelCase ) _construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , j - wt[i - 1] , __lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Tuple = [3, 2, 4, 4] lowerCamelCase__ : List[Any] = [4, 3, 2, 3] lowerCamelCase__ : Optional[Any] = 4 lowerCamelCase__ : Tuple = 6 lowerCamelCase__ : str = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCamelCase__ , lowerCamelCase__ : str = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
33
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __magic_name__ : '''simple docstring''' def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = num_channels snake_case__ = embeddings_size snake_case__ = hidden_sizes snake_case__ = depths snake_case__ = is_training snake_case__ = use_labels snake_case__ = hidden_act snake_case__ = num_labels snake_case__ = scope snake_case__ = len(_a ) snake_case__ = out_features snake_case__ = out_indices snake_case__ = num_groups def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ): snake_case__ = BitModel(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ): snake_case__ = self.num_labels snake_case__ = BitForImageClassification(_a ) model.to(_a ) model.eval() snake_case__ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ): snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case__ = None snake_case__ = BitBackbone(config=_a ) model.to(_a ) model.eval() snake_case__ = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ = config_and_inputs snake_case__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowercase : int = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) __lowercase : Tuple = False __lowercase : Optional[Any] = False __lowercase : str = False __lowercase : Tuple = False __lowercase : Tuple = False def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = BitModelTester(self ) snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return @unittest.skip(reason='''Bit does not output attentions''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): pass def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(_a ) snake_case__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(config=_a ) for name, module in model.named_modules(): if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ): snake_case__ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): snake_case__ = model(**self._prepare_for_class(_a , _a ) ) snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ = self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ = layer_type snake_case__ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ = True check_hidden_states_output(_a , _a , _a ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): pass def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = BitModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def SCREAMING_SNAKE_CASE ( ) -> Any: snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a ) # forward pass with torch.no_grad(): snake_case__ = model(**_a ) # verify the logits snake_case__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _a ) snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @require_torch class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else () __lowercase : int = BitConfig __lowercase : Any = False def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = BitModelTester(self )
33
1
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: # Construct model if openai_config_file == "": snake_case__ = OpenAIGPTConfig() else: snake_case__ = OpenAIGPTConfig.from_json_file(__lowerCAmelCase ) snake_case__ = OpenAIGPTModel(__lowerCAmelCase ) # Load weights from numpy load_tf_weights_in_openai_gpt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model snake_case__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME snake_case__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , __lowerCAmelCase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) lowerCamelCase__ : Any = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
33
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCamelCase__ : Any = """\ """ lowerCamelCase__ : List[str] = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ lowerCamelCase__ : Any = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case__ = '''cuda''' else: snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' snake_case__ = AutoModelForCausalLM.from_pretrained(_a ) snake_case__ = model.to(_a ) snake_case__ = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case__ = model.config.max_length - 1 else: snake_case__ = model.config.max_length snake_case__ = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) snake_case__ = encodings['''input_ids'''] snake_case__ = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case__ = [] snake_case__ = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): snake_case__ = min(start_index + batch_size , len(_a ) ) snake_case__ = encoded_texts[start_index:end_index] snake_case__ = attn_masks[start_index:end_index] if add_start_token: snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) snake_case__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) snake_case__ = encoded_batch with torch.no_grad(): snake_case__ = model(_a , attention_mask=_a ).logits snake_case__ = out_logits[..., :-1, :].contiguous() snake_case__ = labels[..., 1:].contiguous() snake_case__ = attn_mask[..., 1:].contiguous() snake_case__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
33
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "" ) -> dict[str, float]: snake_case__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase ).text , '''html.parser''' ) snake_case__ = soup.find_all('''td''' , attrs='''titleColumn''' ) snake_case__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(__lowerCAmelCase , __lowerCAmelCase ) } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None: snake_case__ = get_imdb_top_aaa_movies() with open(__lowerCAmelCase , '''w''' , newline='''''' ) as out_file: snake_case__ = csv.writer(__lowerCAmelCase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
33
import os from datetime import datetime as dt from github import Github lowerCamelCase__ : int = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] ) snake_case__ = g.get_repo('''huggingface/diffusers''' ) snake_case__ = repo.get_issues(state='''open''' ) for issue in open_issues: snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
33
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : str = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Tuple = 'unispeech' def __init__( self:str , _a:int=32 , _a:List[Any]=7_68 , _a:List[Any]=12 , _a:Optional[int]=12 , _a:str=30_72 , _a:Any="gelu" , _a:List[str]=0.1 , _a:Optional[Any]=0.1 , _a:List[str]=0.1 , _a:Optional[Any]=0.0 , _a:Union[str, Any]=0.0 , _a:List[Any]=0.1 , _a:List[str]=0.1 , _a:Any=0.02 , _a:Optional[Any]=1e-5 , _a:List[str]="group" , _a:Optional[int]="gelu" , _a:List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _a:str=(5, 2, 2, 2, 2, 2, 2) , _a:Any=(10, 3, 3, 3, 3, 2, 2) , _a:Any=False , _a:List[Any]=1_28 , _a:List[Any]=16 , _a:Any=False , _a:Optional[int]=True , _a:Optional[int]=0.05 , _a:List[Any]=10 , _a:Any=2 , _a:Dict=0.0 , _a:List[str]=10 , _a:Dict=0 , _a:int=3_20 , _a:Union[str, Any]=2 , _a:str=0.1 , _a:Tuple=1_00 , _a:Optional[Any]=2_56 , _a:Optional[int]=2_56 , _a:Optional[int]=0.1 , _a:Union[str, Any]="mean" , _a:Optional[Any]=False , _a:List[Any]=False , _a:Optional[Any]=2_56 , _a:Any=80 , _a:Optional[Any]=0 , _a:Optional[Any]=1 , _a:Any=2 , _a:Optional[int]=0.5 , **_a:List[str] , ): super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) snake_case__ = hidden_size snake_case__ = feat_extract_norm snake_case__ = feat_extract_activation snake_case__ = list(_a ) snake_case__ = list(_a ) snake_case__ = list(_a ) snake_case__ = conv_bias snake_case__ = num_conv_pos_embeddings snake_case__ = num_conv_pos_embedding_groups snake_case__ = len(self.conv_dim ) snake_case__ = num_hidden_layers snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = num_attention_heads snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = feat_proj_dropout snake_case__ = final_dropout snake_case__ = layerdrop snake_case__ = layer_norm_eps snake_case__ = initializer_range snake_case__ = num_ctc_classes snake_case__ = vocab_size snake_case__ = do_stable_layer_norm snake_case__ = use_weighted_layer_sum snake_case__ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ = apply_spec_augment snake_case__ = mask_time_prob snake_case__ = mask_time_length snake_case__ = mask_time_min_masks snake_case__ = mask_feature_prob snake_case__ = mask_feature_length snake_case__ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations snake_case__ = num_codevectors_per_group snake_case__ = num_codevector_groups snake_case__ = contrastive_logits_temperature snake_case__ = feat_quantizer_dropout snake_case__ = num_negatives snake_case__ = codevector_dim snake_case__ = proj_codevector_dim snake_case__ = diversity_loss_weight # ctc loss snake_case__ = ctc_loss_reduction snake_case__ = ctc_zero_infinity # pretraining loss snake_case__ = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self:str ): return functools.reduce(operator.mul , self.conv_stride , 1 )
33
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = _distribute_shards(**__lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if expected is RuntimeError: with pytest.raises(__lowerCAmelCase ): _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) else: snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase ) assert out == expected
33
1
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __magic_name__ (yaml.SafeLoader ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[str] ): snake_case__ = [self.constructed_objects[key_node] for key_node, _ in node.value] snake_case__ = [tuple(_a ) if isinstance(_a , _a ) else key for key in keys] snake_case__ = Counter(_a ) snake_case__ = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Any , _a:Any=False ): snake_case__ = super().construct_mapping(_a , deep=_a ) self._check_no_duplicates_on_constructed_node(_a ) return mapping def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple[Optional[str], str]: snake_case__ = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: snake_case__ = full_content[1:].index('''---''' ) + 1 snake_case__ = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(__lowerCAmelCase ) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata @classmethod def SCREAMING_SNAKE_CASE__ ( cls:Tuple , _a:Path ): with open(_a , encoding='''utf-8''' ) as readme_file: snake_case__ , snake_case__ = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(_a ) else: return cls() def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Path ): if path.exists(): with open(_a , encoding='''utf-8''' ) as readme_file: snake_case__ = readme_file.read() else: snake_case__ = None snake_case__ = self._to_readme(_a ) with open(_a , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[str] = None ): if readme_content is not None: snake_case__ , snake_case__ = _split_yaml_from_readme(_a ) snake_case__ = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: snake_case__ = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def SCREAMING_SNAKE_CASE__ ( cls:str , _a:str ): snake_case__ = yaml.load(_a , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields snake_case__ = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**_a ) def SCREAMING_SNAKE_CASE__ ( self:str ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=_a , allow_unicode=_a , encoding='''utf-8''' , ).decode('''utf-8''' ) lowerCamelCase__ : List[str] = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCamelCase__ : str = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") lowerCamelCase__ : Optional[Any] = ap.parse_args() lowerCamelCase__ : List[Any] = Path(args.readme_filepath) lowerCamelCase__ : Optional[Any] = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : str = IFImgaImgSuperResolutionPipeline __lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} __lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) __lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'} def SCREAMING_SNAKE_CASE__ ( self:Dict ): return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ): if str(_a ).startswith('''mps''' ): snake_case__ = torch.manual_seed(_a ) else: snake_case__ = torch.Generator(device=_a ).manual_seed(_a ) snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a ) snake_case__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self:str ): self._test_save_load_local() def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
33
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> tuple[str, float]: if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
33
import math class __magic_name__ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ): snake_case__ = 0.0 snake_case__ = 0.0 for i in range(len(_a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ): for i in range(len(_a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def SCREAMING_SNAKE_CASE ( ) -> None: # Training Examples ( m, n ) snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case__ = SelfOrganizingMap() snake_case__ = 3 snake_case__ = 0.5 for _ in range(__lowerCAmelCase ): for j in range(len(__lowerCAmelCase ) ): # training sample snake_case__ = training_samples[j] # Compute the winning vector snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # Update the winning vector snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # classify test sample snake_case__ = [0, 0, 0, 1] snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
33
1
import functools def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: # Validation if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(__lowerCAmelCase ) == 0: return 0 if min(__lowerCAmelCase ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(__lowerCAmelCase ) >= 366: raise ValueError('''All days elements should be less than 366''' ) snake_case__ = set(__lowerCAmelCase ) @functools.cache def dynamic_programming(__lowerCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
33
from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes snake_case__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] snake_case__ = [] snake_case__ = 0 snake_case__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: snake_case__ = [] snake_case__ = -1 for i in range(__lowerCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: snake_case__ = i total_time += burst_time[target_process] completed += 1 snake_case__ = 0 snake_case__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: snake_case__ = [0] * no_of_processes for i in range(__lowerCAmelCase ): snake_case__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7] lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0] lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
33
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : str = field(default='language-modeling' ,metadata={'include_in_asdict_even_if_is_default': True} ) __lowercase : ClassVar[Features] = Features({'text': Value('string' )} ) __lowercase : ClassVar[Features] = Features({} ) __lowercase : str = "text" @property def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): return {self.text_column: "text"}
33
lowerCamelCase__ : List[str] = """Alexander Joslin""" import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} snake_case__ = Stack() snake_case__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCAmelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCAmelCase ) elif i == ")": # RULE 4 snake_case__ = operator_stack.peek() operator_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operand_stack.peek() operand_stack.pop() snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase ) operand_stack.push(__lowerCAmelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
33
1