code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
class a : """simple docstring""" def __init__( self : Tuple , lowerCamelCase : list ) -> None: __snake_case : str = set_counts __snake_case : Union[str, Any] = max(lowerCamelCase ) __snake_case : List[Any] = len(lowerCamelCase ) __snake_case : Tuple = [1] * num_sets __snake_case : Dict = list(range(lowerCamelCase ) ) def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> bool: __snake_case : List[Any] = self.get_parent(lowerCamelCase ) __snake_case : Tuple = self.get_parent(lowerCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __snake_case : List[str] = 0 __snake_case : List[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __snake_case : Dict = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = src_parent __snake_case : Tuple = self.set_counts[src_parent] __snake_case : str = max(self.max_set , lowerCamelCase ) return True def __snake_case ( self : int , lowerCamelCase : int ) -> int: if self.parents[disj_set] == disj_set: return disj_set __snake_case : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
81
from __future__ import annotations def A ( lowercase__ : int ) -> list[int]: UpperCamelCase__ :Union[str, Any] = [True] * limit UpperCamelCase__ :int = False UpperCamelCase__ :Optional[Any] = False UpperCamelCase__ :str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase__ :List[Any] = i * 2 while index < limit: UpperCamelCase__ :Tuple = False UpperCamelCase__ :Tuple = index + i UpperCamelCase__ :str = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def A ( lowercase__ : int = 100_0000 ) -> int: UpperCamelCase__ :Any = prime_sieve(lowercase__ ) UpperCamelCase__ :Optional[int] = 0 UpperCamelCase__ :Optional[Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): UpperCamelCase__ :Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase__ :Union[str, Any] = j - i UpperCamelCase__ :Any = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
45
0
"""simple docstring""" def a__ ( lowerCAmelCase__ ): if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] UpperCAmelCase_ = grid[0] for row_n in range(1 , len(lowerCAmelCase__ ) ): UpperCAmelCase_ = grid[row_n] UpperCAmelCase_ = fill_row(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_ = grid[row_n] return grid[-1][-1] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): current_row[0] += row_above[0] for cell_n in range(1 , len(lowerCAmelCase__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
82
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ): UpperCamelCase__ :Optional[Any] = parent UpperCamelCase__ :Dict = batch_size UpperCamelCase__ :Tuple = seq_length UpperCamelCase__ :Dict = is_training UpperCamelCase__ :List[str] = use_input_mask UpperCamelCase__ :Optional[Any] = use_token_type_ids UpperCamelCase__ :Tuple = use_labels UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Tuple = hidden_size UpperCamelCase__ :Optional[Any] = num_hidden_layers UpperCamelCase__ :int = num_attention_heads UpperCamelCase__ :Optional[int] = intermediate_multiple_size UpperCamelCase__ :Optional[Any] = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout UpperCamelCase__ :List[Any] = attention_dropout UpperCamelCase__ :List[str] = weight_tying UpperCamelCase__ :List[str] = max_position_embeddings UpperCamelCase__ :Dict = type_vocab_size UpperCamelCase__ :List[Any] = type_sequence_label_size UpperCamelCase__ :List[str] = initializer_range UpperCamelCase__ :int = num_labels UpperCamelCase__ :Dict = num_choices UpperCamelCase__ :Any = scope def __a ( self :Any ): UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :str = None if self.use_input_mask: UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __a ( self :Union[str, Any] ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def __a ( self :Union[str, Any] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs() UpperCamelCase__ :Optional[int] = True return config, input_ids, input_mask, token_labels def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ): UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ): UpperCamelCase__ :List[str] = True UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ): UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = True UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() # first forward pass UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0] UpperCamelCase__ :Union[str, Any] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0] # select random slice UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) def __a ( self :Tuple ): UpperCamelCase__ :int = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () _snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () _snake_case : str = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) _snake_case : Union[str, Any] = False _snake_case : Dict = False _snake_case : List[str] = False _snake_case : Optional[int] = False def __a ( self :List[Any] ): UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self ) UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Dict ): self.config_tester.run_common_tests() def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): # This regression test was failing with PyTorch < 1.3 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ :Dict = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def __a ( self :int ): UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b""" UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] UpperCamelCase__ :Union[str, Any] = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = [] for prompt in prompts: UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 ) UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) predicted_outputs += generated_string self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
45
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __snake_case ( _lowercase): snake_case__ : List[str] = "speech_to_text_2" snake_case__ : Dict = ["past_key_values"] snake_case__ : Optional[Any] = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=1_0_0_0_0 , __lowerCAmelCase : str=6 , __lowerCAmelCase : Union[str, Any]=2_0_4_8 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str="relu" , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=1 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : int=2 , __lowerCAmelCase : int=1_0_2_4 , **__lowerCAmelCase : List[Any] , ): """simple docstring""" _lowerCamelCase : int = vocab_size _lowerCamelCase : Dict = d_model _lowerCamelCase : Optional[Any] = decoder_ffn_dim _lowerCamelCase : Any = decoder_layers _lowerCamelCase : int = decoder_attention_heads _lowerCamelCase : Union[str, Any] = dropout _lowerCamelCase : int = attention_dropout _lowerCamelCase : Any = activation_dropout _lowerCamelCase : List[Any] = activation_function _lowerCamelCase : Optional[Any] = init_std _lowerCamelCase : List[Any] = decoder_layerdrop _lowerCamelCase : Any = use_cache _lowerCamelCase : Tuple = decoder_layers _lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCamelCase : Dict = max_target_positions super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
83
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def A ( lowercase__ : dict ) -> tuple: return (data["data"], data["target"]) def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier: UpperCamelCase__ :Tuple = XGBClassifier() classifier.fit(lowercase__ , lowercase__ ) return classifier def A ( ) -> None: UpperCamelCase__ :str = load_iris() UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split( lowercase__ , lowercase__ , test_size=0.25 ) UpperCamelCase__ :Optional[int] = iris["""target_names"""] # Create an XGBoost Classifier from the training data UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
45
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Optional[Any] = ["""image_processor""", """tokenizer"""] _UpperCamelCase : Union[str, Any] = """CLIPImageProcessor""" _UpperCamelCase : List[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , snake_case=None , snake_case=None , **snake_case ): lowercase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , snake_case , ) lowercase = kwargs.pop('feature_extractor' ) lowercase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(snake_case , snake_case ) def __call__( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: lowercase = self.tokenizer(snake_case , return_tensors=snake_case , **snake_case ) if images is not None: lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case ) if text is not None and images is not None: lowercase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case ) def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ): return self.tokenizer.batch_decode(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ): return self.tokenizer.decode(*snake_case , **snake_case ) @property def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE__ ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , ) return self.image_processor
84
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A ( lowercase__ : Optional[int] ) -> Optional[Any]: UpperCamelCase__ :Union[str, Any] = {} UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""] UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
45
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowercase__ : bytes , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = f'''{sampling_rate}''' SCREAMING_SNAKE_CASE__ : Any = '1' SCREAMING_SNAKE_CASE__ : int = 'f32le' SCREAMING_SNAKE_CASE__ : Dict = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ffmpeg_process.communicate(lowercase__ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error SCREAMING_SNAKE_CASE__ : Any = output_stream[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.frombuffer(lowercase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _a ( lowercase__ : int , lowercase__ : float , lowercase__ : str = "f32le" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = f'''{sampling_rate}''' SCREAMING_SNAKE_CASE__ : Optional[int] = '1' if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE__ : Optional[int] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) SCREAMING_SNAKE_CASE__ : Dict = platform.system() if system == "Linux": SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'alsa' SCREAMING_SNAKE_CASE__ : Any = 'default' elif system == "Darwin": SCREAMING_SNAKE_CASE__ : Optional[Any] = 'avfoundation' SCREAMING_SNAKE_CASE__ : Dict = ':0' elif system == "Windows": SCREAMING_SNAKE_CASE__ : int = 'dshow' SCREAMING_SNAKE_CASE__ : Dict = 'default' SCREAMING_SNAKE_CASE__ : int = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] SCREAMING_SNAKE_CASE__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample SCREAMING_SNAKE_CASE__ : int = _ffmpeg_stream(lowercase__ , lowercase__ ) for item in iterator: yield item def _a ( lowercase__ : int , lowercase__ : float , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[Tuple[float, float], float]] = None , lowercase__ : str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = stream_chunk_s else: SCREAMING_SNAKE_CASE__ : Optional[int] = chunk_length_s SCREAMING_SNAKE_CASE__ : Any = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ ) if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE__ : int = np.intaa SCREAMING_SNAKE_CASE__ : Any = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE__ : int = np.floataa SCREAMING_SNAKE_CASE__ : Optional[int] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: SCREAMING_SNAKE_CASE__ : str = chunk_length_s / 6 SCREAMING_SNAKE_CASE__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowercase__ , (int, float) ): SCREAMING_SNAKE_CASE__ : List[str] = [stride_length_s, stride_length_s] SCREAMING_SNAKE_CASE__ : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample SCREAMING_SNAKE_CASE__ : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample SCREAMING_SNAKE_CASE__ : List[Any] = datetime.datetime.now() SCREAMING_SNAKE_CASE__ : Union[str, Any] = datetime.timedelta(seconds=lowercase__ ) for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ): # Put everything back in numpy scale SCREAMING_SNAKE_CASE__ : Tuple = np.frombuffer(item['raw'] , dtype=lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple[int, int] , lowercase__ : bool = False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = b'' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) SCREAMING_SNAKE_CASE__ : int = 0 for raw in iterator: acc += raw if stream and len(lowercase__ ) < chunk_len: SCREAMING_SNAKE_CASE__ : int = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowercase__ ) >= chunk_len: # We are flushing the accumulator SCREAMING_SNAKE_CASE__ : Union[str, Any] = (_stride_left, stride_right) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'raw': acc[:chunk_len], 'stride': stride} if stream: SCREAMING_SNAKE_CASE__ : Optional[int] = False yield item SCREAMING_SNAKE_CASE__ : Optional[Any] = stride_left SCREAMING_SNAKE_CASE__ : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowercase__ ) > stride_left: SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'raw': acc, 'stride': (_stride_left, 0)} if stream: SCREAMING_SNAKE_CASE__ : Optional[int] = False yield item def _a ( lowercase__ : List[Any] , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = 2**24 # 16Mo try: with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process: while True: SCREAMING_SNAKE_CASE__ : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
85
def A ( lowercase__ : int ) -> Optional[Any]: stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() UpperCamelCase = [int(item) for item in user_input.split(",")] print(stooge_sort(unsorted))
45
0
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Any=56 , UpperCAmelCase : Dict=True , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Dict=7 , UpperCAmelCase : Any="gelu_new" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : Any=16 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : str=0.02 , UpperCAmelCase : str=4 , UpperCAmelCase : List[str]="block_sparse" , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=False , UpperCAmelCase : int=2 , UpperCAmelCase : Any=3 , ): A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_attention_mask A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = num_choices A_ = rescale_embeddings A_ = attention_type A_ = use_bias A_ = block_size A_ = num_random_blocks def __A ( self : Any ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_attention_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __A ( self : Tuple ): A_ = self.prepare_config_and_inputs() A_ , A_ , A_ , A_ = config_and_inputs A_ = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : List[str] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) _lowerCamelCase : str = False _lowerCamelCase : List[str] = False def __A ( self : Any ): A_ = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __A ( self : str ): super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __A ( self : List[Any] ): super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __A ( self : Optional[Any] ): super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __A ( self : Dict ): super().test_hidden_states_output() @slow def __A ( self : str ): for model_class_name in self.all_model_classes: A_ = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(UpperCAmelCase ) def __A ( self : Dict ): if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __A ( self : Any ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Tuple ): return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) with self.subTest("JIT Enabled" ): A_ = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): A_ = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : List[str]=1E-5 , UpperCAmelCase : Optional[int]="outputs" , UpperCAmelCase : List[Any]=None ): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
86
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]: UpperCamelCase__ :str = SavedModel() UpperCamelCase__ :List[str] = [] with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowercase__ )] ) with open(lowercase__ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) UpperCamelCase__ :Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ ) UpperCamelCase__ :List[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowercase__ ) if strict and len(lowercase__ ) > 0: raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(lowercase__ ) > 0: print(f"""Found the following incompatible ops for the opset {opset}:""" ) print(*lowercase__ , sep="""\n""" ) else: print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) UpperCamelCase = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
45
0
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ ) A__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''sigmoid''' UpperCAmelCase__ = '''softmax''' UpperCAmelCase__ = '''none''' @add_end_docstrings( UpperCAmelCase__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = ClassificationFunction.NONE def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int: '''simple docstring''' A__ = tokenizer_kwargs A__ = {} if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None: A__ = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None: A__ = top_k A__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , ) if return_all_scores: A__ = None else: A__ = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A__ = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase__) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''') return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.model(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None: A__ = self.model.config.function_to_apply else: A__ = ClassificationFunction.NONE A__ = model_outputs['''logits'''][0] A__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A__ = sigmoid(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.SOFTMAX: A__ = softmax(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.NONE: A__ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__) if top_k is not None: A__ = dict_scores[:top_k] return dict_scores
87
from __future__ import annotations def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]: UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) UpperCamelCase__ :Optional[Any] = { """a""": 0.08497, """b""": 0.01492, """c""": 0.02202, """d""": 0.04253, """e""": 0.11162, """f""": 0.02228, """g""": 0.02015, """h""": 0.06094, """i""": 0.07546, """j""": 0.00153, """k""": 0.01292, """l""": 0.04025, """m""": 0.02406, """n""": 0.06749, """o""": 0.07507, """p""": 0.01929, """q""": 0.00095, """r""": 0.07587, """s""": 0.06327, """t""": 0.09356, """u""": 0.02758, """v""": 0.00978, """w""": 0.02560, """x""": 0.00150, """y""": 0.01994, """z""": 0.00077, } else: # Custom frequencies dictionary UpperCamelCase__ :Optional[int] = frequencies_dict if not case_sensitive: UpperCamelCase__ :int = ciphertext.lower() # Chi squared statistic values UpperCamelCase__ :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowercase__ ) ): UpperCamelCase__ :int = """""" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter UpperCamelCase__ :Optional[int] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: UpperCamelCase__ :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary UpperCamelCase__ :Union[str, Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] UpperCamelCase__ :int = min( lowercase__ , key=lowercase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
45
0
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict UpperCAmelCase = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def _snake_case ( __snake_case : int , __snake_case : str ): """simple docstring""" return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _snake_case ( __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = _TestCommandArgs(dataset=__snake_case , all_configs=__snake_case , save_infos=__snake_case ) _lowerCamelCase : Union[str, Any] = TestCommand(*__snake_case ) test_command.run() _lowerCamelCase : List[str] = os.path.join(__snake_case , """README.md""" ) assert os.path.exists(__snake_case ) _lowerCamelCase : Dict = DatasetInfosDict.from_directory(__snake_case ) _lowerCamelCase : int = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 2351563, """num_examples""": 10000, }, { """name""": """validation""", """num_bytes""": 238418, """num_examples""": 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: _lowerCamelCase , _lowerCamelCase : Any = getattr(dataset_infos["""default"""] , __snake_case ), getattr(expected_dataset_infos["""default"""] , __snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case , __snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
88
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
45
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _lowerCamelCase( _a ): def __init__( self, *lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[Any]: """simple docstring""" super().__init__(*lowerCamelCase, **lowerCamelCase) _lowercase : Any = eval_examples _lowercase : List[Any] = post_process_function def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase = "eval", **lowerCamelCase, ) -> Dict[str, float]: """simple docstring""" _lowercase : Optional[Any] = gen_kwargs.copy() _lowercase : List[Any] = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length ) _lowercase : Any = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams ) _lowercase : Optional[Any] = gen_kwargs _lowercase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset _lowercase : Optional[int] = self.get_eval_dataloader(lowerCamelCase) _lowercase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _lowercase : List[Any] = self.compute_metrics _lowercase : int = None _lowercase : Optional[Any] = time.time() _lowercase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowercase : int = eval_loop( lowerCamelCase, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, ) finally: _lowercase : Union[str, Any] = compute_metrics _lowercase : Optional[int] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase) _lowercase : List[Any] = self.compute_metrics(lowerCamelCase) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'''{metric_key_prefix}_'''): _lowercase : Optional[int] = metrics.pop(lowerCamelCase) metrics.update(output.metrics) else: _lowercase : Dict = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCamelCase) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) _lowercase : List[str] = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase) return metrics def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase = "test", **lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : str = gen_kwargs.copy() _lowercase : str = self.get_test_dataloader(lowerCamelCase) # Temporarily disable metric computation, we will do it in the loop here. _lowercase : List[str] = self.compute_metrics _lowercase : Any = None _lowercase : str = time.time() _lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowercase : str = eval_loop( lowerCamelCase, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, ) finally: _lowercase : int = compute_metrics _lowercase : int = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), )) if self.post_process_function is None or self.compute_metrics is None: return output _lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase, 'predict') _lowercase : Optional[Any] = self.compute_metrics(lowerCamelCase) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'''{metric_key_prefix}_'''): _lowercase : Optional[Any] = metrics.pop(lowerCamelCase) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase)
89
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase = get_tests_dir("fixtures") UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json") UpperCamelCase = get_tests_dir("fixtures/dummy-config.json") class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[int] = 0 def __a ( self :str ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ :List[str] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) config.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) # make sure private variable is not incorrectly saved UpperCamelCase__ :Tuple = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): with self.assertRaisesRegex( lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def __a ( self :List[Any] ): with self.assertRaisesRegex( lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" ) def __a ( self :int ): with self.assertRaisesRegex( lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def __a ( self :Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def __a ( self :Dict ): try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __a ( self :Optional[int] ): class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : Optional[int] = True try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # If remote code is not set, the default is to use local UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
45
0
'''simple docstring''' from __future__ import annotations def _snake_case ( A , A , A , A , A , ) -> None: lowerCAmelCase__ = len(A ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(A ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A , A , ) def _snake_case ( A ) -> None: lowerCAmelCase__ = [] depth_first_search([] , [] , [] , A , A ) # Print all the boards for board in boards: for column in board: print(A ) print('''''' ) print(len(A ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
90
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ): super().__init__() UpperCamelCase__ :Tuple = value_function UpperCamelCase__ :Optional[int] = unet UpperCamelCase__ :List[str] = scheduler UpperCamelCase__ :Dict = env UpperCamelCase__ :Dict = env.get_dataset() UpperCamelCase__ :Union[str, Any] = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].mean() except: # noqa: E722 pass UpperCamelCase__ :Any = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].std() except: # noqa: E722 pass UpperCamelCase__ :List[Any] = env.observation_space.shape[0] UpperCamelCase__ :List[str] = env.action_space.shape[0] def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ): return (x_in - self.means[key]) / self.stds[key] def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): return x_in * self.stds[key] + self.means[key] def __a ( self :Any , lowerCamelCase__ :int ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): for key, val in cond.items(): UpperCamelCase__ :str = val.clone() return x_in def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = x.shape[0] UpperCamelCase__ :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ ) UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance ) UpperCamelCase__ :Dict = model_std * grad UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Dict = x.detach() UpperCamelCase__ :int = x + scale * grad UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ): # normalize the observations and create batch dimension UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" ) UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 ) UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )} UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device ) UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() UpperCamelCase__ :Dict = x[sorted_idx] UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim] UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy() UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: UpperCamelCase__ :List[str] = 0 else: # if we didn't run value guiding, select a random action UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ ) UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0] return denorm_actions
45
0
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _snake_case ( snake_case__ : Tuple , snake_case__ : int ): A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) A = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) A = transform(snake_case__ ).unsqueeze(0 ).to(snake_case__ ) return image def _snake_case ( snake_case__ : Optional[Any] ): if "visual_encoder" in key: A = re.sub('visual_encoder*' , 'vision_model.encoder' , snake_case__ ) if "blocks" in key: A = re.sub(r'blocks' , 'layers' , snake_case__ ) if "attn" in key: A = re.sub(r'attn' , 'self_attn' , snake_case__ ) if "norm1" in key: A = re.sub(r'norm1' , 'layer_norm1' , snake_case__ ) if "norm2" in key: A = re.sub(r'norm2' , 'layer_norm2' , snake_case__ ) if "encoder.norm" in key: A = re.sub(r'encoder.norm' , 'post_layernorm' , snake_case__ ) if "encoder.patch_embed.proj" in key: A = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , snake_case__ ) if "encoder.pos_embed" in key: A = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , snake_case__ ) if "encoder.cls_token" in key: A = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , snake_case__ ) if "self_attn" in key: A = re.sub(r'self_attn.proj' , 'self_attn.projection' , snake_case__ ) return key @torch.no_grad() def _snake_case ( snake_case__ : Any , snake_case__ : str=None ): if config_path is not None: A = BlipConfig.from_pretrained(snake_case__ ) else: A = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) A = BlipForConditionalGeneration(snake_case__ ).eval() A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' A = blip_decoder(pretrained=snake_case__ , image_size=384 , vit='base' ) A = pt_model.eval() A = pt_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(snake_case__ ) A = rename_key(snake_case__ ) A = value hf_model.load_state_dict(snake_case__ ) A = 384 A = load_demo_image(image_size=snake_case__ , device='cpu' ) A = BertTokenizer.from_pretrained('bert-base-uncased' ) A = tokenizer(['a picture of'] ).input_ids A = hf_model.generate(snake_case__ , snake_case__ ) assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] A = hf_model.generate(snake_case__ ) assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' A = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) A = blip_vqa(pretrained=snake_case__ , image_size=snake_case__ , vit='base' ) vqa_model.eval() A = vqa_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(snake_case__ ) A = rename_key(snake_case__ ) A = value A = BlipForQuestionAnswering(snake_case__ ) hf_vqa_model.load_state_dict(snake_case__ ) A = ['How many dogs are in this image?'] A = tokenizer(snake_case__ , return_tensors='pt' ).input_ids A = hf_vqa_model.generate(snake_case__ , snake_case__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' A = blip_itm(pretrained=snake_case__ , image_size=snake_case__ , vit='base' ) itm_model.eval() A = itm_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(snake_case__ ) A = rename_key(snake_case__ ) A = value A = BlipForImageTextRetrieval(snake_case__ ) A = ['A picture of a woman with a dog sitting in a beach'] A = tokenizer( snake_case__ , return_tensors='pt' , padding='max_length' , truncation=snake_case__ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case__ ) hf_itm_model.eval() A = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ ) A = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _lowercase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
91
def A ( lowercase__ : int ) -> bool: if num < 0: return False UpperCamelCase__ :int = num UpperCamelCase__ :int = 0 while num > 0: UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
45
0
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) requires_backends(self , '''vision''' ) self.check_model_type(UpperCAmelCase__ ) def __call__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase__ : List[Any] ): '''simple docstring''' return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase__ : str ): '''simple docstring''' return {}, {}, {} def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Union[str, Any] =load_image(UpperCAmelCase__ ) lowercase : Optional[int] =image.size lowercase : Optional[int] =self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework ) return model_inputs def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Dict =self.model(**UpperCAmelCase__ ) return model_outputs def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =model_outputs.predicted_depth lowercase : Any =torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=UpperCAmelCase__ ) lowercase : int =prediction.squeeze().cpu().numpy() lowercase : Tuple =(output * 255 / np.max(UpperCAmelCase__ )).astype('''uint8''' ) lowercase : Any =Image.fromarray(UpperCAmelCase__ ) lowercase : str ={} lowercase : Dict =predicted_depth lowercase : Tuple =depth return output_dict
92
from __future__ import annotations def A ( lowercase__ : list[int] ) -> bool: return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
45
0
"""simple docstring""" import random class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = [ord(__UpperCAmelCase ) for i in text] lowerCAmelCase__ :List[Any] = [] lowerCAmelCase__ :Optional[Any] = [] for i in plain: lowerCAmelCase__ :List[Any] = random.randint(1 , 3_0_0 ) lowerCAmelCase__ :List[Any] = (i + k) * k cipher.append(__UpperCAmelCase ) key.append(__UpperCAmelCase ) return cipher, key @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = [] for i in range(len(__UpperCAmelCase ) ): lowerCAmelCase__ :Optional[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(__UpperCAmelCase ) ) return "".join(__UpperCAmelCase ) if __name__ == "__main__": __A , __A = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
93
from __future__ import annotations class lowerCAmelCase_ : """simple docstring""" def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ): UpperCamelCase__ :List[str] = key def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :List[str] = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :int = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Dict = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :List[str] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Tuple = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :Optional[int] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
45
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets SCREAMING_SNAKE_CASE = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' SCREAMING_SNAKE_CASE = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' SCREAMING_SNAKE_CASE = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def A__ ( self : int ) -> int: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def A__ ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=1 , UpperCAmelCase : List[Any]="binary" , UpperCAmelCase : str=None ) -> Optional[int]: '''simple docstring''' lowercase : Union[str, Any] =fa_score( UpperCAmelCase , UpperCAmelCase , labels=UpperCAmelCase , pos_label=UpperCAmelCase , average=UpperCAmelCase , sample_weight=UpperCAmelCase ) return {"f1": float(UpperCAmelCase ) if score.size == 1 else score}
94
import random def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int: UpperCamelCase__ :List[Any] = a[left_index] UpperCamelCase__ :Dict = left_index + 1 for j in range(left_index + 1 , lowercase__ ): if a[j] < pivot: UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j] i += 1 UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index] return i - 1 def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]: if left < right: UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 ) UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ ) quick_sort_random( lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point quick_sort_random( lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point def A ( ) -> List[Any]: UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip() UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )] quick_sort_random(lowercase__ , 0 , len(lowercase__ ) ) print(lowercase__ ) if __name__ == "__main__": main()
45
0
"""simple docstring""" import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class UpperCamelCase_ (__A ): __magic_name__ = '''''' __magic_name__ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) __magic_name__ = None # compression type in fsspec. ex: "gzip" __magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[Any] , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , **lowerCAmelCase_ : List[str] ) -> List[Any]: super().__init__(self , **lowerCAmelCase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode UpperCAmelCase_ : Dict = fsspec.open( lowerCAmelCase_ , mode="rb" , protocol=lowerCAmelCase_ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) UpperCAmelCase_ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) UpperCAmelCase_ : Any = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) UpperCAmelCase_ : Any = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCAmelCase_ ).lstrip("/" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: if self.dir_cache is None: UpperCAmelCase_ : Optional[Any] = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} UpperCAmelCase_ : str = {f["name"]: f} def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> Any: return self.file.open().read() def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> Tuple: UpperCAmelCase_ : List[str] = self._strip_protocol(lowerCAmelCase_ ) if mode != "rb": raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" ) return self.file.open() class UpperCamelCase_ (__A ): __magic_name__ = '''bz2''' __magic_name__ = '''bz2''' __magic_name__ = '''.bz2''' class UpperCamelCase_ (__A ): __magic_name__ = '''gzip''' __magic_name__ = '''gzip''' __magic_name__ = '''.gz''' class UpperCamelCase_ (__A ): __magic_name__ = '''lz4''' __magic_name__ = '''lz4''' __magic_name__ = '''.lz4''' class UpperCamelCase_ (__A ): __magic_name__ = '''xz''' __magic_name__ = '''xz''' __magic_name__ = '''.xz''' class UpperCamelCase_ (__A ): __magic_name__ = '''zstd''' __magic_name__ = '''zstd''' __magic_name__ = '''.zst''' def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase_ : List[Any] , ) -> Dict: super().__init__( fo=lowerCAmelCase_ , mode=lowerCAmelCase_ , target_protocol=lowerCAmelCase_ , target_options=lowerCAmelCase_ , block_size=lowerCAmelCase_ , **lowerCAmelCase_ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 UpperCAmelCase_ : Optional[Any] = self.file.__enter__ class UpperCamelCase_ : def __init__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = file_ def __enter__( self : Tuple ) -> List[Any]: self._file.__enter__() return self def __exit__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Optional[int]: self._file.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __iter__( self : Optional[int] ) -> int: return iter(self._file ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return next(self._file ) def __getattr__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]: return getattr(self._file , lowerCAmelCase_ ) def fixed_enter(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ): return WrappedFile(_enter(*lowerCAmelCase_ , **lowerCAmelCase_ ) ) UpperCAmelCase_ : List[Any] = fixed_enter
95
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" _snake_case : Tuple = """dinat""" _snake_case : List[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ): super().__init__(**lowerCamelCase__ ) UpperCamelCase__ :Any = patch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :int = embed_dim UpperCamelCase__ :Optional[Any] = depths UpperCamelCase__ :Any = len(lowerCamelCase__ ) UpperCamelCase__ :str = num_heads UpperCamelCase__ :Optional[int] = kernel_size UpperCamelCase__ :Optional[int] = dilations UpperCamelCase__ :Tuple = mlp_ratio UpperCamelCase__ :Dict = qkv_bias UpperCamelCase__ :List[str] = hidden_dropout_prob UpperCamelCase__ :List[str] = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = drop_path_rate UpperCamelCase__ :Tuple = hidden_act UpperCamelCase__ :List[Any] = layer_norm_eps UpperCamelCase__ :Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) ) UpperCamelCase__ :Tuple = layer_scale_init_value UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
45
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = "vit_msn" def __init__( self : Optional[int] , __snake_case : Optional[Any]=7_6_8 , __snake_case : Dict=1_2 , __snake_case : int=1_2 , __snake_case : Optional[int]=3_0_7_2 , __snake_case : Any="gelu" , __snake_case : str=0.0 , __snake_case : List[Any]=0.0 , __snake_case : str=0.02 , __snake_case : Optional[int]=1E-06 , __snake_case : List[Any]=2_2_4 , __snake_case : int=1_6 , __snake_case : List[Any]=3 , __snake_case : List[Any]=True , **__snake_case : Optional[int] , ) -> List[Any]: super().__init__(**__snake_case ) __magic_name__: int = hidden_size __magic_name__: int = num_hidden_layers __magic_name__: Tuple = num_attention_heads __magic_name__: List[str] = intermediate_size __magic_name__: List[Any] = hidden_act __magic_name__: Optional[int] = hidden_dropout_prob __magic_name__: List[Any] = attention_probs_dropout_prob __magic_name__: Optional[int] = initializer_range __magic_name__: Tuple = layer_norm_eps __magic_name__: Dict = image_size __magic_name__: Union[str, Any] = patch_size __magic_name__: Optional[Any] = num_channels __magic_name__: str = qkv_bias
96
def A ( lowercase__ : int , lowercase__ : int ) -> int: return int(input_a == input_a == 0 ) def A ( ) -> None: print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
45
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json', } class lowercase__( UpperCAmelCase ): """simple docstring""" a :List[Any] = 'git_vision_model' def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Dict=2_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : List[Any]="quick_gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.02 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_size lowercase_ = intermediate_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = num_channels lowercase_ = patch_size lowercase_ = image_size lowercase_ = initializer_range lowercase_ = attention_dropout lowercase_ = layer_norm_eps lowercase_ = hidden_act @classmethod def _lowercase ( cls : Any , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : str ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''' ) == "git": lowercase_ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :int = 'git' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : str=1_0_2_4 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=1e-12 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0_1 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0_2 , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> str: super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if vision_config is None: lowercase_ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' ) lowercase_ = GitVisionConfig(**SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = tie_word_embeddings lowercase_ = num_image_with_embedding lowercase_ = bos_token_id lowercase_ = eos_token_id def _lowercase ( self : Dict ) -> List[Any]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.vision_config.to_dict() lowercase_ = self.__class__.model_type return output
97
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ): UpperCamelCase__ :List[Any] = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :List[str] = image_size UpperCamelCase__ :Dict = min_resolution UpperCamelCase__ :List[str] = max_resolution UpperCamelCase__ :str = do_resize UpperCamelCase__ :int = size_divisor UpperCamelCase__ :Optional[int] = do_rescale def __a ( self :str ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None def __a ( self :Dict ): UpperCamelCase__ :Dict = GLPNImageProcessingTester(self ) @property def __a ( self :List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) ) def __a ( self :Optional[int] ): pass def __a ( self :Tuple ): # Initialize image_processing UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :str ): # Initialize image_processing UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :Any ): # Initialize image_processing UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
45
0
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowercase__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Optional[Any]: '''simple docstring''' super().__init__() self.register_modules( vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCamelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__ ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' self.enable_attention_slicing(lowerCAmelCase__ ) @torch.no_grad() def __call__( self : Optional[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 50 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , **lowerCAmelCase__ : Optional[int] , ) -> Any: '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = 1 elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = len(lowerCAmelCase__ ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(lowerCAmelCase__ )}.""" ) # get prompt text embeddings _UpperCamelCase = self.tokenizer( lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) _UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) _UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: _UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_embeddings.shape _UpperCamelCase = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 ) _UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _UpperCamelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _UpperCamelCase = 42 if negative_prompt is None: _UpperCamelCase = [''''''] elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !=""" f""" {type(lowerCAmelCase__ )}.""" ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = [negative_prompt] elif batch_size != len(lowerCAmelCase__ ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ''' the batch size of `prompt`.''' ) else: _UpperCamelCase = negative_prompt _UpperCamelCase = text_input_ids.shape[-1] _UpperCamelCase = self.tokenizer( lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , ) _UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _UpperCamelCase = uncond_embeddings.shape[1] _UpperCamelCase = uncond_embeddings.repeat(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) _UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) _UpperCamelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _UpperCamelCase = torch.randn( lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(self.device ) _UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to( self.device ) else: _UpperCamelCase = torch.randn( lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ ) _UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _UpperCamelCase = latents_reference.to(self.device ) _UpperCamelCase = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images _UpperCamelCase = (latents_shape[3] - latents_shape_reference[3]) // 2 _UpperCamelCase = (latents_shape[2] - latents_shape_reference[2]) // 2 _UpperCamelCase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx _UpperCamelCase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy _UpperCamelCase = 0 if dx < 0 else dx _UpperCamelCase = 0 if dy < 0 else dy _UpperCamelCase = max(-dx , 0 ) _UpperCamelCase = max(-dy , 0 ) # import pdb # pdb.set_trace() _UpperCamelCase = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowerCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _UpperCamelCase = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _UpperCamelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCamelCase = {} if accepts_eta: _UpperCamelCase = eta for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ): # expand the latents if we are doing classifier free guidance _UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCamelCase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) # predict the noise residual _UpperCamelCase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample # perform guidance if do_classifier_free_guidance: _UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2 ) _UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _UpperCamelCase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = 1 / 0.18215 * latents _UpperCamelCase = self.vae.decode(lowerCAmelCase__ ).sample _UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: _UpperCamelCase = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase__ ) , return_tensors='''pt''' ).to( self.device ) _UpperCamelCase , _UpperCamelCase = self.safety_checker( images=lowerCAmelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: _UpperCamelCase = None if output_type == "pil": _UpperCamelCase = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
98
import math def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowercase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCamelCase = "Enter the base and the power separated by a comma: " UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. UpperCamelCase = res(xa, ya) UpperCamelCase = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
45
0
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): __a = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ ) return new.join(lowerCAmelCase__ ) def a (lowerCAmelCase__ ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def a (lowerCAmelCase__ ): __a = {} __a = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __a = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' ) if "res_path" in key: __a = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __a = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __a = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 ) __a = value.float() return upgrade @torch.no_grad() def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=True ): from dall_e import Encoder __a = Encoder() if os.path.exists(lowerCAmelCase__ ): __a = torch.load(lowerCAmelCase__ ) else: __a = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __a = ckpt.state_dict() encoder.load_state_dict(lowerCAmelCase__ ) if config_path is not None: __a = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ ) else: __a = FlavaImageCodebookConfig() __a = FlavaImageCodebook(lowerCAmelCase__ ).eval() __a = encoder.state_dict() __a = upgrade_state_dict(lowerCAmelCase__ ) hf_model.load_state_dict(lowerCAmelCase__ ) __a = hf_model.state_dict() __a = count_parameters(lowerCAmelCase__ ) __a = count_parameters(lowerCAmelCase__ ) assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCAmelCase__ ) else: return hf_state_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
99
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCAmelCase_ : """simple docstring""" def __init__( self :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = parent UpperCamelCase__ :int = 13 UpperCamelCase__ :Optional[int] = 7 UpperCamelCase__ :Dict = True UpperCamelCase__ :Dict = True UpperCamelCase__ :str = True UpperCamelCase__ :List[Any] = True UpperCamelCase__ :Any = True UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Tuple = False UpperCamelCase__ :Optional[int] = 2 UpperCamelCase__ :List[str] = 99 UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Any = 32 UpperCamelCase__ :List[str] = 2 UpperCamelCase__ :int = 4 UpperCamelCase__ :List[str] = 0.1 UpperCamelCase__ :Union[str, Any] = 0.1 UpperCamelCase__ :Union[str, Any] = 5_12 UpperCamelCase__ :List[str] = 16 UpperCamelCase__ :str = 2 UpperCamelCase__ :Optional[int] = 0.02 UpperCamelCase__ :Optional[int] = 3 UpperCamelCase__ :Optional[int] = 4 UpperCamelCase__ :Optional[int] = """last""" UpperCamelCase__ :Tuple = True UpperCamelCase__ :int = None UpperCamelCase__ :Dict = 0 def __a ( self :int ): UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) UpperCamelCase__ :Union[str, Any] = None if self.use_input_lengths: UpperCamelCase__ :Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase__ :List[str] = None if self.use_token_type_ids: UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase__ :int = None UpperCamelCase__ :List[str] = None UpperCamelCase__ :List[str] = None if self.use_labels: UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ :List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ): UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask] UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ): UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Any = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ): UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ ) UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ): UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ ) UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ): UpperCamelCase__ :Any = self.num_labels UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase__ :List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = self.num_choices UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :int = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self :Tuple ): UpperCamelCase__ :str = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :str = config_and_inputs UpperCamelCase__ :Optional[Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : List[str] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : List[Any] = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _snake_case : Optional[int] = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : List[Any] = False _snake_case : Tuple = False def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self :List[str] ): UpperCamelCase__ :List[str] = TFFlaubertModelTester(self ) UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 ) def __a ( self :int ): self.config_tester.run_common_tests() def __a ( self :List[str] ): UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ ) def __a ( self :Tuple ): UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ ) @slow def __a ( self :str ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __a ( self :str ): UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCamelCase__ :Optional[int] = tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0] UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , lowerCamelCase__ ) # compare the actual values for a slice. UpperCamelCase__ :str = tf.convert_to_tensor( [ [ [-1.876_8773, -1.56_6555, 0.2707_2418], [-1.692_0038, -0.587_3505, 1.932_9599], [-2.956_3985, -1.699_3835, 1.797_2052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
45
0
class __snake_case : '''simple docstring''' def __init__( self , A_ , A_=None , A_=None ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = data SCREAMING_SNAKE_CASE__ = previous SCREAMING_SNAKE_CASE__ = next_node def __str__( self ): '''simple docstring''' return f'''{self.data}''' def lowercase_ ( self ): '''simple docstring''' return self.data def lowercase_ ( self ): '''simple docstring''' return self.next def lowercase_ ( self ): '''simple docstring''' return self.previous class __snake_case : '''simple docstring''' def __init__( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = head def __iter__( self ): '''simple docstring''' return self def lowercase_ ( self ): '''simple docstring''' if not self.current: raise StopIteration else: SCREAMING_SNAKE_CASE__ = self.current.get_data() SCREAMING_SNAKE_CASE__ = self.current.get_next() return value class __snake_case : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = None # First node in list SCREAMING_SNAKE_CASE__ = None # Last node in list def __str__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.head SCREAMING_SNAKE_CASE__ = [] while current is not None: nodes.append(current.get_data() ) SCREAMING_SNAKE_CASE__ = current.get_next() return " ".join(str(A_ ) for node in nodes ) def __contains__( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.head while current: if current.get_data() == value: return True SCREAMING_SNAKE_CASE__ = current.get_next() return False def __iter__( self ): '''simple docstring''' return LinkedListIterator(self.head ) def lowercase_ ( self ): '''simple docstring''' if self.head: return self.head.get_data() return None def lowercase_ ( self ): '''simple docstring''' if self.tail: return self.tail.get_data() return None def lowercase_ ( self , A_ ): '''simple docstring''' if self.head is None: SCREAMING_SNAKE_CASE__ = node SCREAMING_SNAKE_CASE__ = node else: self.insert_before_node(self.head , A_ ) def lowercase_ ( self , A_ ): '''simple docstring''' if self.head is None: self.set_head(A_ ) else: self.insert_after_node(self.tail , A_ ) def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = Node(A_ ) if self.head is None: self.set_head(A_ ) else: self.set_tail(A_ ) def lowercase_ ( self , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = node SCREAMING_SNAKE_CASE__ = node.previous if node.get_previous() is None: SCREAMING_SNAKE_CASE__ = node_to_insert else: SCREAMING_SNAKE_CASE__ = node_to_insert SCREAMING_SNAKE_CASE__ = node_to_insert def lowercase_ ( self , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = node SCREAMING_SNAKE_CASE__ = node.next if node.get_next() is None: SCREAMING_SNAKE_CASE__ = node_to_insert else: SCREAMING_SNAKE_CASE__ = node_to_insert SCREAMING_SNAKE_CASE__ = node_to_insert def lowercase_ ( self , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = Node(A_ ) SCREAMING_SNAKE_CASE__ = self.head while node: if current_position == position: self.insert_before_node(A_ , A_ ) return current_position += 1 SCREAMING_SNAKE_CASE__ = node.next self.insert_after_node(self.tail , A_ ) def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.head while node: if node.get_data() == item: return node SCREAMING_SNAKE_CASE__ = node.get_next() raise Exception('''Node not found''' ) def lowercase_ ( self , A_ ): '''simple docstring''' if (node := self.get_node(A_ )) is not None: if node == self.head: SCREAMING_SNAKE_CASE__ = self.head.get_next() if node == self.tail: SCREAMING_SNAKE_CASE__ = self.tail.get_previous() self.remove_node_pointers(A_ ) @staticmethod def lowercase_ ( A_ ): '''simple docstring''' if node.get_next(): SCREAMING_SNAKE_CASE__ = node.previous if node.get_previous(): SCREAMING_SNAKE_CASE__ = node.next SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None def lowercase_ ( self ): '''simple docstring''' return self.head is None def __snake_case ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
100
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device UpperCamelCase = False class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self :List[Any] ): UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :Any = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :str = generator.manual_seed(0 ) UpperCamelCase__ :str = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __a ( self :Dict ): UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = """cyberpunk 2077""" UpperCamelCase__ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :str = torch.manual_seed(0 ) UpperCamelCase__ :Dict = pipe.dual_guided( prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """ UpperCamelCase__ :List[str] = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.text_to_image( prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
45
0
from collections.abc import Callable import numpy as np def a__ ( A__, A__, A__, A__, A__ ): SCREAMING_SNAKE_CASE_ : Dict = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE_ : int = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE_ : int = ya SCREAMING_SNAKE_CASE_ : Dict = xa for k in range(A__ ): SCREAMING_SNAKE_CASE_ : Any = y[k] + step_size * ode_func(A__, y[k] ) SCREAMING_SNAKE_CASE_ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(A__, y[k] ) + ode_func(x + step_size, A__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
101
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase_ : """simple docstring""" def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ): UpperCamelCase__ :Any = parent UpperCamelCase__ :Union[str, Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :Optional[Any] = image_size UpperCamelCase__ :Union[str, Any] = patch_size UpperCamelCase__ :Union[str, Any] = is_training UpperCamelCase__ :str = use_input_mask UpperCamelCase__ :int = use_token_type_ids UpperCamelCase__ :int = use_labels UpperCamelCase__ :List[Any] = vocab_size UpperCamelCase__ :List[str] = hidden_size UpperCamelCase__ :List[Any] = num_hidden_layers UpperCamelCase__ :List[str] = num_attention_heads UpperCamelCase__ :Tuple = intermediate_size UpperCamelCase__ :Any = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Dict = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :Union[str, Any] = type_sequence_label_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[Any] = coordinate_size UpperCamelCase__ :Tuple = shape_size UpperCamelCase__ :Dict = num_labels UpperCamelCase__ :str = num_choices UpperCamelCase__ :Tuple = scope UpperCamelCase__ :str = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCamelCase__ :List[str] = text_seq_length UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1 UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length def __a ( self :Tuple ): UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCamelCase__ :str = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase__ :List[str] = bbox[i, j, 3] UpperCamelCase__ :Optional[int] = bbox[i, j, 1] UpperCamelCase__ :Optional[Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase__ :Tuple = bbox[i, j, 2] UpperCamelCase__ :Optional[Any] = bbox[i, j, 0] UpperCamelCase__ :List[str] = tmp_coordinate UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__ :Any = None if self.use_input_mask: UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCamelCase__ :Optional[Any] = None if self.use_token_type_ids: UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCamelCase__ :List[str] = None UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCamelCase__ :Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ): UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ ) # text + image UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) UpperCamelCase__ :Tuple = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , ) UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ): UpperCamelCase__ :Optional[Any] = self.num_labels UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ ) UpperCamelCase__ :List[str] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = self.num_labels UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ): UpperCamelCase__ :Dict = 2 UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ ) UpperCamelCase__ :int = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs() ((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs UpperCamelCase__ :List[str] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _snake_case : Dict = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) _snake_case : Optional[int] = False _snake_case : List[str] = False _snake_case : Tuple = False def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ): return True def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ): UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ ) if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = { k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Tuple = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __a ( self :Dict ): UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self ) UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Any ): self.config_tester.run_common_tests() def __a ( self :Optional[int] ): UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ ) if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ): # The number of elements in the loss should be the same as the number of elements in the label UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0] ] UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCamelCase__ :Optional[Any] = -1_00 UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ ) UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) # Get keys that were added with the _prepare_for_class function UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys() UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters UpperCamelCase__ :str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCamelCase__ :Any = {0: """input_ids"""} for label_key in label_keys: UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = label_key UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCamelCase__ :Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCamelCase__ :List[str] = prepared_for_class[value] UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ ) # Send to model UpperCamelCase__ :str = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ :Dict = type self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Tuple ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @slow def __a ( self :Optional[int] ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def A ( ) -> List[str]: UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __a ( self :Optional[Any] ): return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None @slow def __a ( self :Dict ): UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) UpperCamelCase__ :List[Any] = self.default_image_processor UpperCamelCase__ :str = prepare_img() UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values UpperCamelCase__ :str = tf.constant([[1, 2]] ) UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) # verify the logits UpperCamelCase__ :int = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ ) UpperCamelCase__ :List[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
45
0
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __magic_name__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , _A , _A=7_6_8 ): '''simple docstring''' super().__init__(_A ) UpperCamelCase : Tuple = proj_size UpperCamelCase : Optional[int] = CLIPVisionModel(_A ) UpperCamelCase : Union[str, Any] = PaintByExampleMapper(_A ) UpperCamelCase : int = nn.LayerNorm(config.hidden_size ) UpperCamelCase : str = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCamelCase : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _a ( self , _A , _A=False ): '''simple docstring''' UpperCamelCase : List[str] = self.model(pixel_values=_A ) UpperCamelCase : Tuple = clip_output.pooler_output UpperCamelCase : List[Any] = self.mapper(latent_states[:, None] ) UpperCamelCase : List[Any] = self.final_layer_norm(_A ) UpperCamelCase : Union[str, Any] = self.proj_out(_A ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class lowercase__ ( nn.Module ): """simple docstring""" def __init__( self , _A ): '''simple docstring''' super().__init__() UpperCamelCase : Any = (config.num_hidden_layers + 1) // 5 UpperCamelCase : Optional[Any] = config.hidden_size UpperCamelCase : int = 1 UpperCamelCase : Optional[int] = nn.ModuleList( [ BasicTransformerBlock(_A , _A , _A , activation_fn="""gelu""" , attention_bias=_A ) for _ in range(_A ) ] ) def _a ( self , _A ): '''simple docstring''' for block in self.blocks: UpperCamelCase : Union[str, Any] = block(_A ) return hidden_states
102
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The column name of the images in the files."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} ) _snake_case : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __a ( self :List[str] ): UpperCamelCase__ :Optional[Any] = {} if self.train_dir is not None: UpperCamelCase__ :int = self.train_dir if self.validation_dir is not None: UpperCamelCase__ :List[str] = self.validation_dir UpperCamelCase__ :Optional[int] = data_files if data_files else None @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = field( default=lowercase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) _snake_case : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} ) _snake_case : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _snake_case : float = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) _snake_case : bool = field( default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : float = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def A ( lowercase__ : Union[str, Any] ) -> Dict: UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def A ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase__ :List[str] = training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase__ :Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase__ :Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase__ :Union[str, Any] = split["""train"""] UpperCamelCase__ :Any = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ :Optional[int] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Tuple = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names else: UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase__ :Union[str, Any] = data_args.image_column_name elif "image" in column_names: UpperCamelCase__ :Optional[Any] = """image""" elif "img" in column_names: UpperCamelCase__ :List[str] = """img""" else: UpperCamelCase__ :List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""] else: UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase__ :Any = Compose( [ Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(lowercase__ : Tuple ): UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase__ :Optional[Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate UpperCamelCase__ :Tuple = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase__ :Union[str, Any] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: UpperCamelCase__ :Any = None if training_args.resume_from_checkpoint is not None: UpperCamelCase__ :int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase__ :Dict = last_checkpoint UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase__ :int = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase__ ) trainer.save_metrics("""eval""" , lowercase__ ) # Write model card and (optionally) push to hub UpperCamelCase__ :Optional[int] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def A ( lowercase__ : Union[str, Any] ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
103
from __future__ import annotations def A ( lowercase__ : int ) -> list[int]: UpperCamelCase__ :Union[str, Any] = [True] * limit UpperCamelCase__ :int = False UpperCamelCase__ :Optional[Any] = False UpperCamelCase__ :str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase__ :List[Any] = i * 2 while index < limit: UpperCamelCase__ :Tuple = False UpperCamelCase__ :Tuple = index + i UpperCamelCase__ :str = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def A ( lowercase__ : int = 100_0000 ) -> int: UpperCamelCase__ :Any = prime_sieve(lowercase__ ) UpperCamelCase__ :Optional[int] = 0 UpperCamelCase__ :Optional[Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): UpperCamelCase__ :Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase__ :Union[str, Any] = j - i UpperCamelCase__ :Any = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
45
0
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : str, UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Any ) -> Any: """simple docstring""" if isinstance(UpperCAmelCase_, UpperCAmelCase_ ): A__ = np.full((len(UpperCAmelCase_ ), sequence_length, 2), UpperCAmelCase_ ) else: A__ = np.full((len(UpperCAmelCase_ ), sequence_length), UpperCAmelCase_ ) for i, tensor in enumerate(UpperCAmelCase_ ): if padding_side == "right": if isinstance(UpperCAmelCase_, UpperCAmelCase_ ): A__ = tensor[:sequence_length] else: A__ = tensor[:sequence_length] else: if isinstance(UpperCAmelCase_, UpperCAmelCase_ ): A__ = tensor[:sequence_length] else: A__ = tensor[:sequence_length] return out_tensor.tolist() def _lowerCamelCase ( UpperCAmelCase_ : int ) -> List[Any]: """simple docstring""" A__ = ord(UpperCAmelCase_ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True A__ = unicodedata.category(UpperCAmelCase_ ) if cat.startswith("P" ): return True return False @dataclass class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : PreTrainedTokenizerBase A__ : Union[bool, str, PaddingStrategy] = True A__ : Optional[int] = None A__ : Optional[int] = None A__ : int = -1_0_0 A__ : str = "pt" def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: import torch A__ = "label" if "label" in features[0].keys() else "labels" A__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None A__ = self.tokenizer.pad( SCREAMING_SNAKE_CASE__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch A__ = torch.tensor(batch["entity_ids"] ).shape[1] A__ = self.tokenizer.padding_side if padding_side == "right": A__ = [ list(SCREAMING_SNAKE_CASE__ ) + [self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE__ )) for label in labels ] else: A__ = [ [self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE__ )) + list(SCREAMING_SNAKE_CASE__ ) for label in labels ] A__ = [feature["ner_tags"] for feature in features] A__ = padding_tensor(SCREAMING_SNAKE_CASE__ , -1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = [feature["original_entity_spans"] for feature in features] A__ = padding_tensor(SCREAMING_SNAKE_CASE__ , (-1, -1) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = {k: torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
104
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ): UpperCamelCase__ :Optional[Any] = parent UpperCamelCase__ :Dict = batch_size UpperCamelCase__ :Tuple = seq_length UpperCamelCase__ :Dict = is_training UpperCamelCase__ :List[str] = use_input_mask UpperCamelCase__ :Optional[Any] = use_token_type_ids UpperCamelCase__ :Tuple = use_labels UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Tuple = hidden_size UpperCamelCase__ :Optional[Any] = num_hidden_layers UpperCamelCase__ :int = num_attention_heads UpperCamelCase__ :Optional[int] = intermediate_multiple_size UpperCamelCase__ :Optional[Any] = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout UpperCamelCase__ :List[Any] = attention_dropout UpperCamelCase__ :List[str] = weight_tying UpperCamelCase__ :List[str] = max_position_embeddings UpperCamelCase__ :Dict = type_vocab_size UpperCamelCase__ :List[Any] = type_sequence_label_size UpperCamelCase__ :List[str] = initializer_range UpperCamelCase__ :int = num_labels UpperCamelCase__ :Dict = num_choices UpperCamelCase__ :Any = scope def __a ( self :Any ): UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :str = None if self.use_input_mask: UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __a ( self :Union[str, Any] ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def __a ( self :Union[str, Any] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs() UpperCamelCase__ :Optional[int] = True return config, input_ids, input_mask, token_labels def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ): UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ): UpperCamelCase__ :List[str] = True UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ): UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = True UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() # first forward pass UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0] UpperCamelCase__ :Union[str, Any] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0] # select random slice UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) def __a ( self :Tuple ): UpperCamelCase__ :int = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () _snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () _snake_case : str = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) _snake_case : Union[str, Any] = False _snake_case : Dict = False _snake_case : List[str] = False _snake_case : Optional[int] = False def __a ( self :List[Any] ): UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self ) UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Dict ): self.config_tester.run_common_tests() def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): # This regression test was failing with PyTorch < 1.3 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ :Dict = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def __a ( self :int ): UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b""" UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] UpperCamelCase__ :Union[str, Any] = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = [] for prompt in prompts: UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 ) UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) predicted_outputs += generated_string self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
45
0
import os import numpy import onnx def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = a.name SCREAMING_SNAKE_CASE_ : Dict = b.name SCREAMING_SNAKE_CASE_ : str = '' SCREAMING_SNAKE_CASE_ : Optional[Any] = '' SCREAMING_SNAKE_CASE_ : Optional[Any] = a == b SCREAMING_SNAKE_CASE_ : str = name_a SCREAMING_SNAKE_CASE_ : str = name_b return res def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Dict ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ ) def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> List[Any]: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = list(model.graph.initializer ) SCREAMING_SNAKE_CASE_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i SCREAMING_SNAKE_CASE_ : List[str] = inits[i].name SCREAMING_SNAKE_CASE_ : str = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ ) def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = os.path.dirname(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE_ : Any = list(model.graph.initializer ) SCREAMING_SNAKE_CASE_ : int = set() SCREAMING_SNAKE_CASE_ : Optional[Any] = {} SCREAMING_SNAKE_CASE_ : Dict = [] SCREAMING_SNAKE_CASE_ : int = 0 for i in range(len(lowerCamelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCamelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCamelCase_ ) dup_set.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = inits[j].data_type SCREAMING_SNAKE_CASE_ : str = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , lowerCamelCase_ ) total_reduced_size += mem_size SCREAMING_SNAKE_CASE_ : int = inits[i].name SCREAMING_SNAKE_CASE_ : List[Any] = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE_ : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' ) SCREAMING_SNAKE_CASE_ : int = sorted(lowerCamelCase_ ) _remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'optimized_' + model_file_name SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) onnx.save(lowerCamelCase_ , lowerCamelCase_ ) return new_model
105
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def A ( lowercase__ : dict ) -> tuple: return (data["data"], data["target"]) def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier: UpperCamelCase__ :Tuple = XGBClassifier() classifier.fit(lowercase__ , lowercase__ ) return classifier def A ( ) -> None: UpperCamelCase__ :str = load_iris() UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split( lowercase__ , lowercase__ , test_size=0.25 ) UpperCamelCase__ :Optional[int] = iris["""target_names"""] # Create an XGBoost Classifier from the training data UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
45
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( _lowerCamelCase ): A_ : Optional[Any] = (DEISMultistepScheduler,) A_ : int = (('num_inference_steps', 2_5),) def __UpperCamelCase ( self : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Dict: A = { 'num_train_timesteps': 1_000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'solver_order': 2, } config.update(**__UpperCamelCase ) return config def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str=0 , **__UpperCamelCase : Optional[int] ) -> List[Any]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' , __UpperCamelCase ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config(**__UpperCamelCase ) A = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) A = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals A = dummy_past_residuals[: new_scheduler.config.solver_order] A , A = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample A = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __UpperCamelCase ( self : str ) -> Dict: pass def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : int=0 , **__UpperCamelCase : Dict ) -> List[str]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' , __UpperCamelCase ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) A = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) A = dummy_past_residuals[: new_scheduler.config.solver_order] A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample A = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Dict ) -> Tuple: if scheduler is None: A = self.scheduler_classes[0] A = self.get_scheduler_config(**__UpperCamelCase ) A = scheduler_class(**__UpperCamelCase ) A = self.scheduler_classes[0] A = self.get_scheduler_config(**__UpperCamelCase ) A = scheduler_class(**__UpperCamelCase ) A = 10 A = self.dummy_model() A = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): A = model(__UpperCamelCase , __UpperCamelCase ) A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' , __UpperCamelCase ) for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**__UpperCamelCase ) A = self.dummy_sample A = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCamelCase , 'set_timesteps' ): scheduler.set_timesteps(__UpperCamelCase ) elif num_inference_steps is not None and not hasattr(__UpperCamelCase , 'set_timesteps' ): A = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] A = dummy_past_residuals[: scheduler.config.solver_order] A = scheduler.timesteps[5] A = scheduler.timesteps[6] A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __UpperCamelCase ( self : Any ) -> int: # make sure that iterating over schedulers with same config names gives same results # for defaults A = DEISMultistepScheduler(**self.get_scheduler_config() ) A = self.full_loop(scheduler=__UpperCamelCase ) A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 A = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A = DPMSolverMultistepScheduler.from_config(scheduler.config ) A = UniPCMultistepScheduler.from_config(scheduler.config ) A = DEISMultistepScheduler.from_config(scheduler.config ) A = self.full_loop(scheduler=__UpperCamelCase ) A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def __UpperCamelCase ( self : List[Any] ) -> Dict: for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def __UpperCamelCase ( self : str ) -> List[Any]: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='deis' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def __UpperCamelCase ( self : Dict ) -> int: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def __UpperCamelCase ( self : Optional[int] ) -> Any: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) A = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def __UpperCamelCase ( self : int ) -> Tuple: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def __UpperCamelCase ( self : Union[str, Any] ) -> str: A = self.full_loop() A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def __UpperCamelCase ( self : int ) -> Optional[Any]: A = self.full_loop(prediction_type='v_prediction' ) A = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def __UpperCamelCase ( self : List[Any] ) -> List[str]: A = self.scheduler_classes[0] A = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) A = scheduler_class(**__UpperCamelCase ) A = 10 A = self.dummy_model() A = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): A = model(__UpperCamelCase , __UpperCamelCase ) A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
106
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A ( lowercase__ : Optional[int] ) -> Optional[Any]: UpperCamelCase__ :Union[str, Any] = {} UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""] UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
45
0
'''simple docstring''' from ...processing_utils import ProcessorMixin class lowercase_ ( _UpperCamelCase ): """simple docstring""" __lowerCAmelCase = "WhisperFeatureExtractor" __lowerCAmelCase = "WhisperTokenizer" def __init__( self : Any, UpperCamelCase__ : Any, UpperCamelCase__ : int ) -> List[Any]: super().__init__(UpperCamelCase__, UpperCamelCase__ ) _A = self.feature_extractor _A = False def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : int=None, UpperCamelCase__ : List[str]=True ) -> Any: return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__, language=UpperCamelCase__, no_timestamps=UpperCamelCase__ ) def __call__( self : str, *UpperCamelCase__ : Any, **UpperCamelCase__ : Tuple ) -> Optional[Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCamelCase__, **UpperCamelCase__ ) _A = kwargs.pop('audio', UpperCamelCase__ ) _A = kwargs.pop('sampling_rate', UpperCamelCase__ ) _A = kwargs.pop('text', UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: _A = args[0] _A = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: _A = self.feature_extractor(UpperCamelCase__, *UpperCamelCase__, sampling_rate=UpperCamelCase__, **UpperCamelCase__ ) if text is not None: _A = self.tokenizer(UpperCamelCase__, **UpperCamelCase__ ) if text is None: return inputs elif audio is None: return encodings else: _A = encodings['input_ids'] return inputs def __UpperCAmelCase ( self : List[Any], *UpperCamelCase__ : Dict, **UpperCamelCase__ : Optional[Any] ) -> Optional[int]: return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ ) def __UpperCAmelCase ( self : List[str], *UpperCamelCase__ : Tuple, **UpperCamelCase__ : Dict ) -> List[str]: return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ ) def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any]="np" ) -> Dict: return self.tokenizer.get_prompt_ids(UpperCamelCase__, return_tensors=UpperCamelCase__ )
107
def A ( lowercase__ : int ) -> Optional[Any]: stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() UpperCamelCase = [int(item) for item in user_input.split(",")] print(stooge_sort(unsorted))
45
0
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def lowerCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase , """embed_dim""" ) ) self.parent.assertTrue(hasattr(lowerCamelCase , """num_heads""" ) ) class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict=13 , lowerCamelCase : List[Any]=64 , lowerCamelCase : str=3 , lowerCamelCase : List[str]=[16, 48, 96] , lowerCamelCase : List[Any]=[1, 3, 6] , lowerCamelCase : Tuple=[1, 2, 10] , lowerCamelCase : Optional[int]=[7, 3, 3] , lowerCamelCase : int=[4, 2, 2] , lowerCamelCase : Dict=[2, 1, 1] , lowerCamelCase : List[Any]=[2, 2, 2] , lowerCamelCase : Optional[int]=[False, False, True] , lowerCamelCase : Tuple=[0.0, 0.0, 0.0] , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Optional[int]=1E-12 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=2 , ) -> Tuple: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: # create a random int32 tensor of given shape _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : Any ) -> Tuple: """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : List[str] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = TFCvtModel(config=lowerCamelCase ) _UpperCAmelCase = model(lowerCamelCase , training=lowerCamelCase ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Optional[Any] ) -> Any: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFCvtForImageClassification(lowerCamelCase ) _UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : List[str] ) -> Dict: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () _lowerCamelCase = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def lowerCamelCase ( self : Dict ) -> Any: """simple docstring""" _UpperCAmelCase = TFCvtModelTester(self ) _UpperCAmelCase = TFCvtConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 ) def lowerCamelCase ( self : Tuple ) -> Dict: """simple docstring""" self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def lowerCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def lowerCamelCase ( self : Any ) -> Dict: """simple docstring""" pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def lowerCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) def lowerCamelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def lowerCamelCase ( self : Dict ) -> Dict: """simple docstring""" super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def lowerCamelCase ( self : Dict ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(lowerCamelCase ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def lowerCamelCase ( self : int ) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(lowerCamelCase ) _UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def lowerCamelCase ( self : int ) -> str: """simple docstring""" def check_hidden_states_output(lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict ): _UpperCAmelCase = model_class(lowerCamelCase ) _UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def lowerCamelCase ( self : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def lowerCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TFCvtModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( ) -> List[str]: _UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _UpperCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""tf""" ) # forward pass _UpperCAmelCase = model(**lowerCamelCase ) # verify the logits _UpperCAmelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) _UpperCAmelCase = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase , atol=1E-4 ) )
108
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]: UpperCamelCase__ :str = SavedModel() UpperCamelCase__ :List[str] = [] with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowercase__ )] ) with open(lowercase__ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) UpperCamelCase__ :Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ ) UpperCamelCase__ :List[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowercase__ ) if strict and len(lowercase__ ) > 0: raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(lowercase__ ) > 0: print(f"""Found the following incompatible ops for the opset {opset}:""" ) print(*lowercase__ , sep="""\n""" ) else: print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) UpperCamelCase = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
45
0
'''simple docstring''' from collections.abc import Callable import numpy as np def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray: '''simple docstring''' __SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) ) __SCREAMING_SNAKE_CASE = np.zeros((n + 1,) ) __SCREAMING_SNAKE_CASE = ya __SCREAMING_SNAKE_CASE = xa for k in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(__UpperCAmelCase , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
109
from __future__ import annotations def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]: UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) UpperCamelCase__ :Optional[Any] = { """a""": 0.08497, """b""": 0.01492, """c""": 0.02202, """d""": 0.04253, """e""": 0.11162, """f""": 0.02228, """g""": 0.02015, """h""": 0.06094, """i""": 0.07546, """j""": 0.00153, """k""": 0.01292, """l""": 0.04025, """m""": 0.02406, """n""": 0.06749, """o""": 0.07507, """p""": 0.01929, """q""": 0.00095, """r""": 0.07587, """s""": 0.06327, """t""": 0.09356, """u""": 0.02758, """v""": 0.00978, """w""": 0.02560, """x""": 0.00150, """y""": 0.01994, """z""": 0.00077, } else: # Custom frequencies dictionary UpperCamelCase__ :Optional[int] = frequencies_dict if not case_sensitive: UpperCamelCase__ :int = ciphertext.lower() # Chi squared statistic values UpperCamelCase__ :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowercase__ ) ): UpperCamelCase__ :int = """""" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter UpperCamelCase__ :Optional[int] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: UpperCamelCase__ :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary UpperCamelCase__ :Union[str, Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] UpperCamelCase__ :int = min( lowercase__ , key=lowercase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
45
0
"""simple docstring""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCamelCase__ = logging.get_logger(__name__) def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ): return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ): UpperCAmelCase__ : List[Any] = to_pil_image(_snake_case ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = pil_image.size UpperCAmelCase__ : List[str] = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type='dict' ,config=_snake_case ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates UpperCAmelCase__ : Optional[Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()] UpperCAmelCase__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices] UpperCAmelCase__ : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] UpperCAmelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] UpperCAmelCase__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] UpperCAmelCase__ : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCAmelCase__ : Any = [] for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase__ : List[str] = [x, y, x + w, y + h] actual_boxes.append(_snake_case ) # finally, normalize the bounding boxes UpperCAmelCase__ : List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) ) assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class a ( lowercase ): UpperCamelCase : List[Any] = ["""pixel_values"""] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = "" , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) UpperCAmelCase__ : Tuple = size if size is not None else {'height': 224, 'width': 224} UpperCAmelCase__ : List[Any] = get_size_dict(UpperCamelCase_ ) UpperCAmelCase__ : Any = do_resize UpperCAmelCase__ : Optional[Any] = size UpperCAmelCase__ : Optional[int] = resample UpperCAmelCase__ : List[str] = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_value UpperCAmelCase__ : List[str] = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD UpperCAmelCase__ : Optional[Any] = apply_ocr UpperCAmelCase__ : Optional[Any] = ocr_lang UpperCAmelCase__ : Tuple = tesseract_config def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = None , **UpperCamelCase_ , ): UpperCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) UpperCAmelCase__ : List[Any] = (size['height'], size['width']) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ): UpperCAmelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : List[str] = size if size is not None else self.size UpperCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase_ ) UpperCAmelCase__ : Any = resample if resample is not None else self.resample UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : List[Any] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : str = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCAmelCase__ : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCAmelCase__ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCAmelCase__ : Any = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' ) # All transformations expect numpy arrays. UpperCAmelCase__ : Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , 'pytesseract' ) UpperCAmelCase__ : Any = [] UpperCAmelCase__ : str = [] for image in images: UpperCAmelCase__ , UpperCAmelCase__ : Tuple = apply_tesseract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) words_batch.append(UpperCamelCase_ ) boxes_batch.append(UpperCamelCase_ ) if do_resize: UpperCAmelCase__ : Optional[int] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: UpperCAmelCase__ : int = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: UpperCAmelCase__ : Optional[Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] UpperCAmelCase__ : List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] UpperCAmelCase__ : int = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase_ ) if apply_ocr: UpperCAmelCase__ : List[Any] = words_batch UpperCAmelCase__ : Any = boxes_batch return data
110
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
45
0
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCAmelCase_ = Mapping[str, np.ndarray] lowerCAmelCase_ = Mapping[str, Any] # Is a nested dict. lowerCAmelCase_ = 0.01 @dataclasses.dataclass(frozen=UpperCAmelCase ) class _snake_case: __snake_case: np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. __snake_case: np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. __snake_case: np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. __snake_case: np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. __snake_case: np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions __snake_case: Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files __snake_case: Optional[str] = None # Templates used to generate this protein (prediction-only) __snake_case: Optional[Sequence[str]] = None # Chain corresponding to each parent __snake_case: Optional[Sequence[int]] = None def _A ( UpperCAmelCase ): '''simple docstring''' A__ = r"""(\[[A-Z]+\]\n)""" A__ = [tag.strip() for tag in re.split(lowercase__ ,lowercase__ ) if len(lowercase__ ) > 0] A__ = zip(tags[0::2] ,[l.split('\n' ) for l in tags[1::2]] ) A__ = ["N", "CA", "C"] A__ = None A__ = None A__ = None for g in groups: if "[PRIMARY]" == g[0]: A__ = g[1][0].strip() for i in range(len(lowercase__ ) ): if seq[i] not in residue_constants.restypes: A__ = """X""" # FIXME: strings are immutable A__ = np.array( [residue_constants.restype_order.get(lowercase__ ,residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: A__ = [] for axis in range(3 ): tertiary.append(list(map(lowercase__ ,g[1][axis].split() ) ) ) A__ = np.array(lowercase__ ) A__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowercase__ ): A__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: A__ = np.array(list(map({'-': 0, '+': 1}.get ,g[1][0].strip() ) ) ) A__ = np.zeros( ( len(lowercase__ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowercase__ ): A__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowercase__ ,atom_mask=lowercase__ ,aatype=lowercase__ ,residue_index=np.arange(len(lowercase__ ) ) ,b_factors=lowercase__ ,) def _A ( UpperCAmelCase ,UpperCAmelCase = 0 ): '''simple docstring''' A__ = [] A__ = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) A__ = prot.parents A__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: A__ = [p for i, p in zip(lowercase__ ,lowercase__ ) if i == chain_id] if parents is None or len(lowercase__ ) == 0: A__ = ["""N/A"""] pdb_headers.append(F"""PARENT {" ".join(lowercase__ )}""" ) return pdb_headers def _A ( UpperCAmelCase ,UpperCAmelCase ): '''simple docstring''' A__ = [] A__ = pdb_str.split('\n' ) A__ = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) A__ = 42 if prot.parents is not None and len(prot.parents ) > 0: A__ = [] if prot.parents_chain_index is not None: A__ = {} for p, i in zip(prot.parents ,prot.parents_chain_index ): parent_dict.setdefault(str(lowercase__ ) ,[] ) parent_dict[str(lowercase__ )].append(lowercase__ ) A__ = max([int(lowercase__ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): A__ = parent_dict.get(str(lowercase__ ) ,['N/A'] ) parents_per_chain.append(lowercase__ ) else: parents_per_chain.append(list(prot.parents ) ) else: A__ = [["""N/A"""]] def make_parent_line(UpperCAmelCase ) -> str: return F"""PARENT {" ".join(lowercase__ )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) A__ = 0 for i, l in enumerate(lowercase__ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowercase__ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowercase__ ): A__ = parents_per_chain[chain_counter] else: A__ = ["""N/A"""] out_pdb_lines.append(make_parent_line(lowercase__ ) ) return "\n".join(lowercase__ ) def _A ( UpperCAmelCase ): '''simple docstring''' A__ = residue_constants.restypes + ["""X"""] def res_atoa(UpperCAmelCase ) -> str: return residue_constants.restype_atoa.get(restypes[r] ,'UNK' ) A__ = residue_constants.atom_types A__ = [] A__ = prot.atom_mask A__ = prot.aatype A__ = prot.atom_positions A__ = prot.residue_index.astype(np.intaa ) A__ = prot.b_factors A__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('Invalid aatypes.' ) A__ = get_pdb_headers(lowercase__ ) if len(lowercase__ ) > 0: pdb_lines.extend(lowercase__ ) A__ = aatype.shape[0] A__ = 1 A__ = 0 A__ = string.ascii_uppercase A__ = None # Add all atom sites. for i in range(lowercase__ ): A__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowercase__ ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ): if mask < 0.5: continue A__ = """ATOM""" A__ = atom_name if len(lowercase__ ) == 4 else F""" {atom_name}""" A__ = """""" A__ = """""" A__ = 1.00 A__ = atom_name[0] # Protein supports only C, N, O, S, this works. A__ = """""" A__ = """A""" if chain_index is not None: A__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! A__ = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(lowercase__ ) atom_index += 1 A__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: A__ = True A__ = chain_index[i + 1] if should_terminate: # Close the chain. A__ = """TER""" A__ = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(lowercase__ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowercase__ ,lowercase__ ) ) pdb_lines.append('END' ) pdb_lines.append('' ) return "\n".join(lowercase__ ) def _A ( UpperCAmelCase ): '''simple docstring''' return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = None ,UpperCAmelCase = None ,UpperCAmelCase = None ,UpperCAmelCase = None ,UpperCAmelCase = None ,): '''simple docstring''' return Protein( aatype=features['aatype'] ,atom_positions=result['final_atom_positions'] ,atom_mask=result['final_atom_mask'] ,residue_index=features['residue_index'] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) ,chain_index=lowercase__ ,remark=lowercase__ ,parents=lowercase__ ,parents_chain_index=lowercase__ ,)
531
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase = get_tests_dir("fixtures") UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json") UpperCamelCase = get_tests_dir("fixtures/dummy-config.json") class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[int] = 0 def __a ( self :str ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ :List[str] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) config.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) # make sure private variable is not incorrectly saved UpperCamelCase__ :Tuple = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): with self.assertRaisesRegex( lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def __a ( self :List[Any] ): with self.assertRaisesRegex( lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" ) def __a ( self :int ): with self.assertRaisesRegex( lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def __a ( self :Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def __a ( self :Dict ): try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __a ( self :Optional[int] ): class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : Optional[int] = True try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # If remote code is not set, the default is to use local UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
45
0
'''simple docstring''' import math def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowercase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase :List[str] = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase , lowerCAmelCase :List[str] = map(int, input(prompt).split(''',''')) lowerCAmelCase , lowerCAmelCase :Any = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase :Tuple = res(xa, ya) lowerCAmelCase :Optional[Any] = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
561
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ): super().__init__() UpperCamelCase__ :Tuple = value_function UpperCamelCase__ :Optional[int] = unet UpperCamelCase__ :List[str] = scheduler UpperCamelCase__ :Dict = env UpperCamelCase__ :Dict = env.get_dataset() UpperCamelCase__ :Union[str, Any] = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].mean() except: # noqa: E722 pass UpperCamelCase__ :Any = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].std() except: # noqa: E722 pass UpperCamelCase__ :List[Any] = env.observation_space.shape[0] UpperCamelCase__ :List[str] = env.action_space.shape[0] def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ): return (x_in - self.means[key]) / self.stds[key] def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): return x_in * self.stds[key] + self.means[key] def __a ( self :Any , lowerCamelCase__ :int ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): for key, val in cond.items(): UpperCamelCase__ :str = val.clone() return x_in def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = x.shape[0] UpperCamelCase__ :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ ) UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance ) UpperCamelCase__ :Dict = model_std * grad UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Dict = x.detach() UpperCamelCase__ :int = x + scale * grad UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ): # normalize the observations and create batch dimension UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" ) UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 ) UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )} UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device ) UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() UpperCamelCase__ :Dict = x[sorted_idx] UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim] UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy() UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: UpperCamelCase__ :List[str] = 0 else: # if we didn't run value guiding, select a random action UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ ) UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0] return denorm_actions
45
0
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : complex , _SCREAMING_SNAKE_CASE : str = "x" , _SCREAMING_SNAKE_CASE : float = 10**-10 , _SCREAMING_SNAKE_CASE : int = 1 , ) -> complex: """simple docstring""" UpperCAmelCase_ : Optional[int] = symbols(lowercase__ ) UpperCAmelCase_ : Dict = lambdify(lowercase__ , lowercase__ ) UpperCAmelCase_ : Any = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) ) UpperCAmelCase_ : List[str] = starting_point while True: if diff_function(lowercase__ ) != 0: UpperCAmelCase_ : List[Any] = prev_guess - multiplicity * func(lowercase__ ) / diff_function( lowercase__ ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess UpperCAmelCase_ : Optional[Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
71
def A ( lowercase__ : int ) -> bool: if num < 0: return False UpperCamelCase__ :int = num UpperCamelCase__ :int = 0 while num > 0: UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
45
0
a : str = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '\"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on a : int = {value: key for key, value in MORSE_CODE_DICT.items()} def lowerCAmelCase_ (lowerCAmelCase__: str ): """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowerCAmelCase_ (lowerCAmelCase__: str ): """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: Union[str, Any] = """Morse code here!""" print(lowercase__ ) UpperCAmelCase_: Dict = encrypt(lowercase__ ) print(lowercase__ ) UpperCAmelCase_: Optional[Any] = decrypt(lowercase__ ) print(lowercase__ ) if __name__ == "__main__": main()
556
from __future__ import annotations def A ( lowercase__ : list[int] ) -> bool: return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
45
0
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __a ( A__ : Optional[Any] , A__ : str=7 ): SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE = """636036""" SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" SCREAMING_SNAKE_CASE = requests.get(lowercase__ , headers=lowercase__ ).json() return result["workflow_runs"] def __a ( A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = get_daily_ci_runs(lowercase__ ) SCREAMING_SNAKE_CASE = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE = workflow_run["""id"""] break return workflow_run_id def __a ( A__ : Union[str, Any] , A__ : int , A__ : int ): SCREAMING_SNAKE_CASE = get_last_daily_ci_runs(lowercase__ ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE = get_artifacts_links(worflow_run_id=lowercase__ , token=lowercase__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE = artifacts_links[artifact_name] download_artifact( artifact_name=lowercase__ , artifact_url=lowercase__ , output_dir=lowercase__ , token=lowercase__ ) def __a ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : List[str] ): get_last_daily_ci_artifacts(lowercase__ , lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE = os.path.join(lowercase__ , F"{artifact_name}.zip" ) if os.path.isfile(lowercase__ ): SCREAMING_SNAKE_CASE = {} with zipfile.ZipFile(lowercase__ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase__ ): # read the file with z.open(lowercase__ ) as f: SCREAMING_SNAKE_CASE = f.read().decode("UTF-8" ) return results
16
from __future__ import annotations class lowerCAmelCase_ : """simple docstring""" def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ): UpperCamelCase__ :List[str] = key def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :List[str] = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :int = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Dict = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :List[str] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Tuple = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :Optional[int] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
45
0
def a ( A__ : int ) -> str: """simple docstring""" if number > 0: raise ValueError('input must be a negative integer' ) _lowercase =len(bin(lowercase__ )[3:] ) _lowercase =bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:] _lowercase =( ( """1""" + """0""" * (binary_number_length - len(lowercase__ )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
291
import random def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int: UpperCamelCase__ :List[Any] = a[left_index] UpperCamelCase__ :Dict = left_index + 1 for j in range(left_index + 1 , lowercase__ ): if a[j] < pivot: UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j] i += 1 UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index] return i - 1 def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]: if left < right: UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 ) UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ ) quick_sort_random( lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point quick_sort_random( lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point def A ( ) -> List[Any]: UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip() UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )] quick_sort_random(lowercase__ , 0 , len(lowercase__ ) ) print(lowercase__ ) if __name__ == "__main__": main()
45
0
"""simple docstring""" from __future__ import annotations def a ( __snake_case : list[int], __snake_case : int ): '''simple docstring''' UpperCAmelCase_ :Tuple = 0 UpperCAmelCase_ :Any = len(lowercase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: UpperCAmelCase_ :Union[str, Any] = i + 1 else: UpperCAmelCase_ :Optional[int] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
608
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" _snake_case : Tuple = """dinat""" _snake_case : List[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ): super().__init__(**lowerCamelCase__ ) UpperCamelCase__ :Any = patch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :int = embed_dim UpperCamelCase__ :Optional[Any] = depths UpperCamelCase__ :Any = len(lowerCamelCase__ ) UpperCamelCase__ :str = num_heads UpperCamelCase__ :Optional[int] = kernel_size UpperCamelCase__ :Optional[int] = dilations UpperCamelCase__ :Tuple = mlp_ratio UpperCamelCase__ :Dict = qkv_bias UpperCamelCase__ :List[str] = hidden_dropout_prob UpperCamelCase__ :List[str] = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = drop_path_rate UpperCamelCase__ :Tuple = hidden_act UpperCamelCase__ :List[Any] = layer_norm_eps UpperCamelCase__ :Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) ) UpperCamelCase__ :Tuple = layer_scale_init_value UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
45
0
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __lowerCamelCase : Dict = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) __lowerCamelCase : Dict = """sshleifer/student_marian_en_ro_6_1""" __lowerCamelCase : Union[str, Any] = """sshleifer/tiny-mbart""" @require_torch class _lowercase ( _A ): def lowercase__ ( self , a=False , a=None , a=True , a=True , a=True , a=True , ): snake_case__ : Tuple =self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=lowerCamelCase__ , num_train_epochs=1 , distributed=lowerCamelCase__ , extra_args_str=lowerCamelCase__ , predict_with_generate=lowerCamelCase__ , do_train=lowerCamelCase__ , do_eval=lowerCamelCase__ , do_predict=lowerCamelCase__ , ) snake_case__ : str =TrainerState.load_from_json(os.path.join(lowerCamelCase__ , """trainer_state.json""" ) ).log_history if not do_eval: return snake_case__ : int =[log for log in logs if """eval_loss""" in log.keys()] snake_case__ : List[str] =eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats snake_case__ : List[Any] =eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , lowerCamelCase__ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def lowercase__ ( self ): self.run_seqaseq_quick() @require_torch_multi_gpu def lowercase__ ( self ): self.run_seqaseq_quick(distributed=lowerCamelCase__ ) @require_torch_multi_gpu def lowercase__ ( self ): self.run_seqaseq_quick(distributed=lowerCamelCase__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowerCamelCase__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): self.run_seqaseq_quick( distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowerCamelCase__ ) @require_apex @require_torch_gpu def lowercase__ ( self ): # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def lowercase__ ( self , a ): # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout snake_case__ : Optional[int] ={ # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } snake_case__ : Union[str, Any] =experiments[experiment_id] snake_case__ : int ={"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} snake_case__ : Any ="""Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**lowerCamelCase__ , extra_args_str=data["""extra_args_str"""] ) snake_case__ : Any =len(re.findall(lowerCamelCase__ , cl.err ) ) self.assertEqual(lowerCamelCase__ , data["""n_matches"""] ) @slow def lowercase__ ( self ): snake_case__ : List[str] =self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=lowerCamelCase__ , learning_rate=3e-4 , num_train_epochs=1_0 , distributed=lowerCamelCase__ , ) # Check metrics snake_case__ : Tuple =TrainerState.load_from_json(os.path.join(lowerCamelCase__ , """trainer_state.json""" ) ).log_history snake_case__ : int =[log for log in logs if """eval_loss""" in log.keys()] snake_case__ : Tuple =eval_metrics[0] snake_case__ : List[Any] =eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , lowerCamelCase__ ) # test if do_predict saves generations and metrics snake_case__ : int =os.listdir(lowerCamelCase__ ) snake_case__ : Dict ={os.path.basename(lowerCamelCase__ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def lowercase__ ( self ): from transformers.training_args import OptimizerNames def train_and_return_metrics(a ) -> Tuple[int, float]: snake_case__ : List[str] ="""--skip_memory_metrics 0""" snake_case__ : Tuple =self.run_trainer( max_len=1_2_8 , model_name=lowerCamelCase__ , learning_rate=3e-4 , num_train_epochs=1 , optim=lowerCamelCase__ , distributed=lowerCamelCase__ , extra_args_str=lowerCamelCase__ , do_eval=lowerCamelCase__ , do_predict=lowerCamelCase__ , n_gpus_to_use=1 , ) # Check metrics snake_case__ : List[Any] =TrainerState.load_from_json(Path(lowerCamelCase__ , """trainer_state.json""" ) ).log_history snake_case__ : List[Any] =int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 ) snake_case__ : Tuple =int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 ) snake_case__ : Tuple =logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss snake_case__ : Tuple =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) snake_case__ : List[Any] =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) snake_case__ : Optional[Any] =gpu_alloc_mem_orig - gpu_alloc_mem_bnb snake_case__ : List[str] =gpu_peak_mem_orig + gpu_alloc_mem_orig snake_case__ : str =gpu_peak_mem_bnb + gpu_alloc_mem_bnb snake_case__ : Optional[Any] =gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings snake_case__ : Union[str, Any] =1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( lowerCamelCase__ , lowerCamelCase__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( lowerCamelCase__ , lowerCamelCase__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( lowerCamelCase__ , lowerCamelCase__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def lowercase__ ( self , a , a , a , a = 3e-3 , a = "adafactor" , a = False , a = None , a = 0 , a = True , a = True , a = True , a = True , a = None , ): snake_case__ : List[Any] =self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" snake_case__ : int =self.get_auto_remove_tmp_dir() snake_case__ : List[Any] =F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(lowerCamelCase__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(lowerCamelCase__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() snake_case__ : Optional[Any] =F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(lowerCamelCase__ )}\n ".split() snake_case__ : Optional[Any] =""" --do_predict """.split() snake_case__ : Union[str, Any] =[] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: snake_case__ : List[Any] =get_gpu_count() snake_case__ : int =get_torch_dist_unique_port() snake_case__ : Tuple =F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() snake_case__ : Dict =[sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCamelCase__ , env=self.get_env() ) else: snake_case__ : Dict =["""run_translation.py"""] + args with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ): main() return output_dir
385
def A ( lowercase__ : int , lowercase__ : int ) -> int: return int(input_a == input_a == 0 ) def A ( ) -> None: print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
45
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : """simple docstring""" def __init__( self : Dict, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : str=3_2, _UpperCAmelCase : Optional[Any]=3, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : str=[1_0, 2_0, 3_0, 4_0], _UpperCAmelCase : Any=[2, 2, 3, 2], _UpperCAmelCase : List[str]=True, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Tuple=3_7, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Optional[Any]=1_0, _UpperCAmelCase : Union[str, Any]=0.02, _UpperCAmelCase : int=["stage2", "stage3", "stage4"], _UpperCAmelCase : Optional[int]=[2, 3, 4], _UpperCAmelCase : Optional[int]=None, ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent SCREAMING_SNAKE_CASE__ : List[str] = batch_size SCREAMING_SNAKE_CASE__ : Dict = image_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : Tuple = num_stages SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Dict = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_act SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE__ : Union[str, Any] = out_features SCREAMING_SNAKE_CASE__ : Any = out_indices SCREAMING_SNAKE_CASE__ : int = scope def A_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def A_ ( self : List[str] ) -> Dict: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def A_ ( self : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ConvNextModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(lowerCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), ) def A_ ( self : Tuple, _UpperCAmelCase : Tuple, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConvNextForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ : str = model(lowerCamelCase__, labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ConvNextBackbone(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(lowerCamelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : Optional[Any] = ConvNextBackbone(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ : int = model(lowerCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def A_ ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs SCREAMING_SNAKE_CASE__ : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) UpperCAmelCase_ = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ConvNextModelTester(self ) SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=lowerCamelCase__, has_text_modality=lowerCamelCase__, hidden_size=3_7 ) def A_ ( self : int ) -> str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self : List[str] ) -> Optional[int]: """simple docstring""" return @unittest.skip(reason="ConvNext does not use inputs_embeds" ) def A_ ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip(reason="ConvNext does not support input and output embeddings" ) def A_ ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason="ConvNext does not use feedforward chunking" ) def A_ ( self : Dict ) -> Optional[int]: """simple docstring""" pass def A_ ( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Dict = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1], lowerCamelCase__ ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def A_ ( self : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase__ ) def A_ ( self : int ) -> Any: """simple docstring""" def check_hidden_states_output(_UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict ): SCREAMING_SNAKE_CASE__ : List[Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int = model(**self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase__ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Optional[Any] = True check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) def A_ ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @slow def A_ ( self : Any ) -> List[Any]: """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : str = ConvNextModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def _a ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase (unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None @slow def A_ ( self : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : str = prepare_img() SCREAMING_SNAKE_CASE__ : Dict = image_processor(images=lowerCamelCase__, return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int = model(**lowerCamelCase__ ) # verify the logits SCREAMING_SNAKE_CASE__ : Any = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, lowerCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase__, atol=1E-4 ) ) @require_torch class lowerCamelCase (unittest.TestCase , __lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = (ConvNextBackbone,) if is_torch_available() else () UpperCAmelCase_ = ConvNextConfig UpperCAmelCase_ = False def A_ ( self : Dict ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConvNextModelTester(self )
663
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ): UpperCamelCase__ :List[Any] = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :List[str] = image_size UpperCamelCase__ :Dict = min_resolution UpperCamelCase__ :List[str] = max_resolution UpperCamelCase__ :str = do_resize UpperCamelCase__ :int = size_divisor UpperCamelCase__ :Optional[int] = do_rescale def __a ( self :str ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None def __a ( self :Dict ): UpperCamelCase__ :Dict = GLPNImageProcessingTester(self ) @property def __a ( self :List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) ) def __a ( self :Optional[int] ): pass def __a ( self :Tuple ): # Initialize image_processing UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :str ): # Initialize image_processing UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :Any ): # Initialize image_processing UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
45
0
"""simple docstring""" from __future__ import annotations import numpy as np def _A (__a ) -> tuple[np.ndarray, np.ndarray]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = np.shape(lowercase__ ) if rows != columns: SCREAMING_SNAKE_CASE_ : str = ( """'table' has to be of square shaped array but got a """ f'{rows}x{columns} array:\n{table}' ) raise ValueError(lowercase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros((rows, columns) ) SCREAMING_SNAKE_CASE_ : int = np.zeros((rows, columns) ) for i in range(lowercase__ ): for j in range(lowercase__ ): SCREAMING_SNAKE_CASE_ : List[str] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) SCREAMING_SNAKE_CASE_ : int = (table[i][j] - total) / upper[j][j] SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 for j in range(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE_ : int = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) SCREAMING_SNAKE_CASE_ : Optional[int] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
512
import math def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowercase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCamelCase = "Enter the base and the power separated by a comma: " UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. UpperCamelCase = res(xa, ya) UpperCamelCase = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
45
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" A = ["""image_processor""", """tokenizer"""] A = """OwlViTImageProcessor""" A = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ): __lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowerCamelCase__ , ) __lowerCamelCase = kwargs.pop("""feature_extractor""" ) __lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="max_length" , UpperCAmelCase="np" , **UpperCAmelCase ): if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(lowerCamelCase__ , lowerCamelCase__ ) or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(text[0] , lowerCamelCase__ )): __lowerCamelCase = [self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )] elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(text[0] , lowerCamelCase__ ): __lowerCamelCase = [] # Maximum number of queries across batch __lowerCamelCase = max([len(lowerCamelCase__ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(lowerCamelCase__ ) != max_num_queries: __lowerCamelCase = t + [""" """] * (max_num_queries - len(lowerCamelCase__ )) __lowerCamelCase = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) encodings.append(lowerCamelCase__ ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": __lowerCamelCase = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __lowerCamelCase = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __lowerCamelCase = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __lowerCamelCase = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __lowerCamelCase = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) __lowerCamelCase = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __lowerCamelCase = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __lowerCamelCase = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) __lowerCamelCase = BatchEncoding() __lowerCamelCase = input_ids __lowerCamelCase = attention_mask if query_images is not None: __lowerCamelCase = BatchEncoding() __lowerCamelCase = self.image_processor( lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ).pixel_values __lowerCamelCase = query_pixel_values if images is not None: __lowerCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) if text is not None and images is not None: __lowerCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: __lowerCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ ) def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ): return self.image_processor.post_process(*lowerCamelCase__ , **lowerCamelCase__ ) def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ): return self.image_processor.post_process_object_detection(*lowerCamelCase__ , **lowerCamelCase__ ) def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ): return self.image_processor.post_process_image_guided_detection(*lowerCamelCase__ , **lowerCamelCase__ ) def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ): return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ): return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @property def lowerCamelCase_ ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase__ , ) return self.image_processor_class @property def lowerCamelCase_ ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase__ , ) return self.image_processor
479
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCAmelCase_ : """simple docstring""" def __init__( self :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = parent UpperCamelCase__ :int = 13 UpperCamelCase__ :Optional[int] = 7 UpperCamelCase__ :Dict = True UpperCamelCase__ :Dict = True UpperCamelCase__ :str = True UpperCamelCase__ :List[Any] = True UpperCamelCase__ :Any = True UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Tuple = False UpperCamelCase__ :Optional[int] = 2 UpperCamelCase__ :List[str] = 99 UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Any = 32 UpperCamelCase__ :List[str] = 2 UpperCamelCase__ :int = 4 UpperCamelCase__ :List[str] = 0.1 UpperCamelCase__ :Union[str, Any] = 0.1 UpperCamelCase__ :Union[str, Any] = 5_12 UpperCamelCase__ :List[str] = 16 UpperCamelCase__ :str = 2 UpperCamelCase__ :Optional[int] = 0.02 UpperCamelCase__ :Optional[int] = 3 UpperCamelCase__ :Optional[int] = 4 UpperCamelCase__ :Optional[int] = """last""" UpperCamelCase__ :Tuple = True UpperCamelCase__ :int = None UpperCamelCase__ :Dict = 0 def __a ( self :int ): UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) UpperCamelCase__ :Union[str, Any] = None if self.use_input_lengths: UpperCamelCase__ :Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase__ :List[str] = None if self.use_token_type_ids: UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase__ :int = None UpperCamelCase__ :List[str] = None UpperCamelCase__ :List[str] = None if self.use_labels: UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ :List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ): UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask] UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ): UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Any = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ): UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ ) UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ): UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ ) UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ): UpperCamelCase__ :Any = self.num_labels UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase__ :List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = self.num_choices UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :int = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self :Tuple ): UpperCamelCase__ :str = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :str = config_and_inputs UpperCamelCase__ :Optional[Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : List[str] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : List[Any] = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _snake_case : Optional[int] = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : List[Any] = False _snake_case : Tuple = False def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self :List[str] ): UpperCamelCase__ :List[str] = TFFlaubertModelTester(self ) UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 ) def __a ( self :int ): self.config_tester.run_common_tests() def __a ( self :List[str] ): UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ ) def __a ( self :Tuple ): UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ ) @slow def __a ( self :str ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __a ( self :str ): UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCamelCase__ :Optional[int] = tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0] UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , lowerCamelCase__ ) # compare the actual values for a slice. UpperCamelCase__ :str = tf.convert_to_tensor( [ [ [-1.876_8773, -1.56_6555, 0.2707_2418], [-1.692_0038, -0.587_3505, 1.932_9599], [-2.956_3985, -1.699_3835, 1.797_2052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
45
0
'''simple docstring''' from __future__ import annotations from typing import Any class _snake_case: def __init__(self : Tuple , a : int , a : int , a : float = 0 ) -> Union[str, Any]: """simple docstring""" A__ = row, column A__ = [[default_value for c in range(lowerCamelCase__ )] for r in range(lowerCamelCase__ )] def __str__(self : Union[str, Any] ) -> Optional[int]: """simple docstring""" A__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n""" # Make string identifier A__ = 0 for row_vector in self.array: for obj in row_vector: A__ = max(lowerCamelCase__ , len(str(lowerCamelCase__ ) ) ) A__ = f"""%{max_element_length}s""" # Make string and return def single_line(a : list[float] ) -> str: nonlocal string_format_identifier A__ = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCamelCase__ ) for row_vector in self.array ) return s def __repr__(self : Optional[Any] ) -> List[Any]: """simple docstring""" return str(self ) def _UpperCamelCase (self : str , a : tuple[int, int] ) -> List[str]: """simple docstring""" if not (isinstance(lowerCamelCase__ , (list, tuple) ) and len(lowerCamelCase__ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__(self : Optional[int] , a : tuple[int, int] ) -> Dict: """simple docstring""" assert self.validate_indicies(lowerCamelCase__ ) return self.array[loc[0]][loc[1]] def __setitem__(self : int , a : tuple[int, int] , a : float ) -> List[Any]: """simple docstring""" assert self.validate_indicies(lowerCamelCase__ ) A__ = value def __add__(self : int , a : Matrix ) -> Any: """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) assert self.row == another.row and self.column == another.column # Add A__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A__ = self[r, c] + another[r, c] return result def __neg__(self : Dict ) -> List[str]: """simple docstring""" A__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A__ = -self[r, c] return result def __sub__(self : Optional[int] , a : Matrix ) -> Any: """simple docstring""" return self + (-another) def __mul__(self : Union[str, Any] , a : int | float | Matrix ) -> List[str]: """simple docstring""" if isinstance(lowerCamelCase__ , (int, float) ): # Scalar multiplication A__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A__ = self[r, c] * another return result elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): # Matrix multiplication assert self.column == another.row A__ = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: A__ = f"""Unsupported type given for another ({type(lowerCamelCase__ )})""" raise TypeError(lowerCamelCase__ ) def _UpperCamelCase (self : Any ) -> Optional[Any]: """simple docstring""" A__ = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): A__ = self[r, c] return result def _UpperCamelCase (self : Tuple , a : Matrix , a : Matrix ) -> Tuple: """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate A__ = v.transpose() A__ = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _A ( ): '''simple docstring''' A__ = Matrix(3 ,3 ,0 ) for i in range(3 ): A__ = 1 print(F"""a^(-1) is {ainv}""" ) # u, v A__ = Matrix(3 ,1 ,0 ) A__ = 1, 2, -3 A__ = Matrix(3 ,1 ,0 ) A__ = 4, -2, 5 print(F"""u is {u}""" ) print(F"""v is {v}""" ) print(F"""uv^T is {u * v.transpose()}""" ) # Sherman Morrison print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase__ ,lowercase__ )}""" ) def _A ( ): '''simple docstring''' import doctest doctest.testmod() testa()
531
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device UpperCamelCase = False class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self :List[Any] ): UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :Any = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :str = generator.manual_seed(0 ) UpperCamelCase__ :str = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __a ( self :Dict ): UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = """cyberpunk 2077""" UpperCamelCase__ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :str = torch.manual_seed(0 ) UpperCamelCase__ :Dict = pipe.dual_guided( prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """ UpperCamelCase__ :List[str] = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.text_to_image( prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
45
0
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :int = False, False, False @dataclass class _lowerCamelCase : '''simple docstring''' A_ : Optional[int] = None A_ : bool = True A_ : bool = True A_ : Optional[str] = None # Automatically constructed A_ : ClassVar[str] = "dict" A_ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) A_ : str = field(default="""Audio""" , init=lowercase__ , repr=lowercase__ ) def __call__( self : List[str] ) -> Any: return self.pa_type def __lowerCAmelCase ( self : List[Any] , _A : Union[str, bytes, dict] ) -> Tuple: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err if isinstance(lowerCamelCase__ , lowerCamelCase__ ): return {"bytes": None, "path": value} elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __magic_name__ : str = BytesIO() sf.write(lowerCamelCase__ , value['array'] , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('pcm' ): # "PCM" only has raw audio bytes if value.get('sampling_rate' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' ) if value.get('bytes' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __magic_name__ : List[Any] = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32767 else: __magic_name__ : int = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32767 __magic_name__ : Optional[Any] = BytesIO(bytes() ) sf.write(lowerCamelCase__ , lowerCamelCase__ , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' ) def __lowerCAmelCase ( self : Any , _A : dict , _A : Optional[Dict[str, Union[str, bool, None]]] = None ) -> Optional[Any]: if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' ) __magic_name__ : int = (value["""path"""], BytesIO(value['bytes'] )) if value["""bytes"""] is not None else (value["""path"""], None) if path is None and file is None: raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err __magic_name__ : int = xsplitext(lowerCamelCase__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( 'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ' 'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( 'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ' 'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' ) if file is None: __magic_name__ : Dict = token_per_repo_id or {} __magic_name__ : Tuple = path.split('::' )[-1] try: __magic_name__ : List[str] = string_to_dict(lowerCamelCase__ , config.HUB_DATASETS_URL )["""repo_id"""] __magic_name__ : Dict = token_per_repo_id[repo_id] except (ValueError, KeyError): __magic_name__ : List[str] = None with xopen(lowerCamelCase__ , 'rb' , use_auth_token=lowerCamelCase__ ) as f: __magic_name__ : Dict = sf.read(lowerCamelCase__ ) else: __magic_name__ : Union[str, Any] = sf.read(lowerCamelCase__ ) __magic_name__ : Optional[Any] = array.T if self.mono: __magic_name__ : str = librosa.to_mono(lowerCamelCase__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: __magic_name__ : Optional[int] = librosa.resample(lowerCamelCase__ , orig_sr=lowerCamelCase__ , target_sr=self.sampling_rate ) __magic_name__ : List[str] = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowerCAmelCase ( self : Dict ) -> str: from .features import Value if self.decode: raise ValueError('Cannot flatten a decoded Audio feature.' ) return { "bytes": Value('binary' ), "path": Value('string' ), } def __lowerCAmelCase ( self : List[str] , _A : Union[pa.StringArray, pa.StructArray] ) -> int: if pa.types.is_string(storage.type ): __magic_name__ : List[Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() ) __magic_name__ : int = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __magic_name__ : Dict = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() ) __magic_name__ : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ): __magic_name__ : int = pa.array([Audio().encode_example(lowerCamelCase__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: __magic_name__ : int = storage.field('bytes' ) else: __magic_name__ : Union[str, Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: __magic_name__ : Tuple = storage.field('path' ) else: __magic_name__ : Any = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() ) __magic_name__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) return array_cast(lowerCamelCase__ , self.pa_type ) def __lowerCAmelCase ( self : Dict , _A : pa.StructArray ) -> Optional[Any]: @no_op_if_value_is_null def path_to_bytes(_A : int ): with xopen(lowerCamelCase__ , 'rb' ) as f: __magic_name__ : List[Any] = f.read() return bytes_ __magic_name__ : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __magic_name__ : int = pa.array( [os.path.basename(lowerCamelCase__ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , ) __magic_name__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() ) return array_cast(lowerCamelCase__ , self.pa_type )
561
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase_ : """simple docstring""" def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ): UpperCamelCase__ :Any = parent UpperCamelCase__ :Union[str, Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :Optional[Any] = image_size UpperCamelCase__ :Union[str, Any] = patch_size UpperCamelCase__ :Union[str, Any] = is_training UpperCamelCase__ :str = use_input_mask UpperCamelCase__ :int = use_token_type_ids UpperCamelCase__ :int = use_labels UpperCamelCase__ :List[Any] = vocab_size UpperCamelCase__ :List[str] = hidden_size UpperCamelCase__ :List[Any] = num_hidden_layers UpperCamelCase__ :List[str] = num_attention_heads UpperCamelCase__ :Tuple = intermediate_size UpperCamelCase__ :Any = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Dict = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :Union[str, Any] = type_sequence_label_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[Any] = coordinate_size UpperCamelCase__ :Tuple = shape_size UpperCamelCase__ :Dict = num_labels UpperCamelCase__ :str = num_choices UpperCamelCase__ :Tuple = scope UpperCamelCase__ :str = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCamelCase__ :List[str] = text_seq_length UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1 UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length def __a ( self :Tuple ): UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCamelCase__ :str = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase__ :List[str] = bbox[i, j, 3] UpperCamelCase__ :Optional[int] = bbox[i, j, 1] UpperCamelCase__ :Optional[Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase__ :Tuple = bbox[i, j, 2] UpperCamelCase__ :Optional[Any] = bbox[i, j, 0] UpperCamelCase__ :List[str] = tmp_coordinate UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__ :Any = None if self.use_input_mask: UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCamelCase__ :Optional[Any] = None if self.use_token_type_ids: UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCamelCase__ :List[str] = None UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCamelCase__ :Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ): UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ ) # text + image UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) UpperCamelCase__ :Tuple = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , ) UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ): UpperCamelCase__ :Optional[Any] = self.num_labels UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ ) UpperCamelCase__ :List[str] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = self.num_labels UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ): UpperCamelCase__ :Dict = 2 UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ ) UpperCamelCase__ :int = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs() ((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs UpperCamelCase__ :List[str] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _snake_case : Dict = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) _snake_case : Optional[int] = False _snake_case : List[str] = False _snake_case : Tuple = False def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ): return True def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ): UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ ) if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = { k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Tuple = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __a ( self :Dict ): UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self ) UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Any ): self.config_tester.run_common_tests() def __a ( self :Optional[int] ): UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ ) if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ): # The number of elements in the loss should be the same as the number of elements in the label UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0] ] UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCamelCase__ :Optional[Any] = -1_00 UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ ) UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) # Get keys that were added with the _prepare_for_class function UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys() UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters UpperCamelCase__ :str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCamelCase__ :Any = {0: """input_ids"""} for label_key in label_keys: UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = label_key UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCamelCase__ :Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCamelCase__ :List[str] = prepared_for_class[value] UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ ) # Send to model UpperCamelCase__ :str = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ :Dict = type self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Tuple ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @slow def __a ( self :Optional[int] ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def A ( ) -> List[str]: UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __a ( self :Optional[Any] ): return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None @slow def __a ( self :Dict ): UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) UpperCamelCase__ :List[Any] = self.default_image_processor UpperCamelCase__ :str = prepare_img() UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values UpperCamelCase__ :str = tf.constant([[1, 2]] ) UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) # verify the logits UpperCamelCase__ :int = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ ) UpperCamelCase__ :List[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
45
0
'''simple docstring''' import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class _snake_case : def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=6 ,_snake_case=17 ,_snake_case=23 ,_snake_case=11 ,_snake_case=True ,): UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : Any = act_dim UpperCAmelCase_ : str = state_dim UpperCAmelCase_ : Tuple = hidden_size UpperCAmelCase_ : List[str] = max_length UpperCAmelCase_ : List[str] = is_training def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase_ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase_ : str = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase_ : int = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=10_00 ) UpperCAmelCase_ : List[Any] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase_ : Union[str, Any] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def UpperCamelCase__ ( self ): return DecisionTransformerConfig( batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,): UpperCAmelCase_ : List[str] = DecisionTransformerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCAmelCase_ : Any = model(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) self.parent.assertEqual(result.state_preds.shape ,states.shape ) self.parent.assertEqual(result.action_preds.shape ,actions.shape ) self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() ( UpperCAmelCase_ ) : Optional[Any] = config_and_inputs UpperCAmelCase_ : Optional[Any] = { """states""": states, """actions""": actions, """rewards""": rewards, """returns_to_go""": returns_to_go, """timesteps""": timesteps, """attention_mask""": attention_mask, } return config, inputs_dict @require_torch class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Any =(DecisionTransformerModel,) if is_torch_available() else () __A : List[str] =() __A : Any ={"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids __A : int =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features __A : List[Any] =False __A : Any =False __A : List[Any] =False __A : List[Any] =False __A : List[str] =False __A : Tuple =False __A : List[Any] =False __A : Union[str, Any] =False __A : int =False def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = DecisionTransformerModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def UpperCamelCase__ ( self ): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = DecisionTransformerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(lowerCamelCase__ ) UpperCAmelCase_ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Optional[int] = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[Any] = [ """states""", """actions""", """rewards""", """returns_to_go""", """timesteps""", """attention_mask""", ] self.assertListEqual(arg_names[: len(lowerCamelCase__ )] ,lowerCamelCase__ ) @require_torch class _snake_case (unittest.TestCase): @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase_ : Tuple = 10 # defined by the RL environment, may be normalized UpperCAmelCase_ : Union[str, Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase_ : List[Any] = model.to(lowerCamelCase__ ) UpperCAmelCase_ : int = model.config torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = torch.randn(1 ,1 ,config.state_dim ).to(device=lowerCamelCase__ ,dtype=torch.floataa ) # env.reset() UpperCAmelCase_ : Optional[int] = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] ,device=lowerCamelCase__ ) UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.floataa ).reshape(1 ,1 ,1 ) UpperCAmelCase_ : List[str] = state UpperCAmelCase_ : str = torch.zeros(1 ,0 ,config.act_dim ,device=lowerCamelCase__ ,dtype=torch.floataa ) UpperCAmelCase_ : Dict = torch.zeros(1 ,0 ,device=lowerCamelCase__ ,dtype=torch.floataa ) UpperCAmelCase_ : List[Any] = torch.tensor(0 ,device=lowerCamelCase__ ,dtype=torch.long ).reshape(1 ,1 ) for step in range(lowerCamelCase__ ): UpperCAmelCase_ : int = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=lowerCamelCase__ )] ,dim=1 ) UpperCAmelCase_ : Any = torch.cat([rewards, torch.zeros(1 ,1 ,device=lowerCamelCase__ )] ,dim=1 ) UpperCAmelCase_ : List[Any] = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model( states=lowerCamelCase__ ,actions=lowerCamelCase__ ,rewards=lowerCamelCase__ ,returns_to_go=lowerCamelCase__ ,timesteps=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,return_dict=lowerCamelCase__ ,) self.assertEqual(action_pred.shape ,actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1E-4 ) ) UpperCAmelCase_ : Optional[Any] = ( # env.step(action) torch.randn(1 ,1 ,config.state_dim ).to(device=lowerCamelCase__ ,dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase_ : List[str] = action_pred[0, -1] UpperCAmelCase_ : List[Any] = torch.cat([states, state] ,dim=1 ) UpperCAmelCase_ : Any = returns_to_go[0, -1] - reward UpperCAmelCase_ : Any = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 ) UpperCAmelCase_ : Dict = torch.cat( [timesteps, torch.ones((1, 1) ,device=lowerCamelCase__ ,dtype=torch.long ) * (step + 1)] ,dim=1 )
71
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The column name of the images in the files."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} ) _snake_case : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __a ( self :List[str] ): UpperCamelCase__ :Optional[Any] = {} if self.train_dir is not None: UpperCamelCase__ :int = self.train_dir if self.validation_dir is not None: UpperCamelCase__ :List[str] = self.validation_dir UpperCamelCase__ :Optional[int] = data_files if data_files else None @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = field( default=lowercase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) _snake_case : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} ) _snake_case : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _snake_case : float = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) _snake_case : bool = field( default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : float = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def A ( lowercase__ : Union[str, Any] ) -> Dict: UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def A ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase__ :List[str] = training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase__ :Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase__ :Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase__ :Union[str, Any] = split["""train"""] UpperCamelCase__ :Any = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ :Optional[int] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Tuple = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names else: UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase__ :Union[str, Any] = data_args.image_column_name elif "image" in column_names: UpperCamelCase__ :Optional[Any] = """image""" elif "img" in column_names: UpperCamelCase__ :List[str] = """img""" else: UpperCamelCase__ :List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""] else: UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase__ :Any = Compose( [ Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(lowercase__ : Tuple ): UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase__ :Optional[Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate UpperCamelCase__ :Tuple = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase__ :Union[str, Any] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: UpperCamelCase__ :Any = None if training_args.resume_from_checkpoint is not None: UpperCamelCase__ :int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase__ :Dict = last_checkpoint UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase__ :int = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase__ ) trainer.save_metrics("""eval""" , lowercase__ ) # Write model card and (optionally) push to hub UpperCamelCase__ :Optional[int] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def A ( lowercase__ : Union[str, Any] ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
0
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a : List[Any] = logging.get_logger(__name__) @dataclass class _a : A = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) A = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) A = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) A = field( default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __snake_case (self ) -> Optional[int]: UpperCAmelCase_: List[Any] = self.task_name.lower() class _a ( _lowerCAmelCase ): A = """train""" A = """dev""" A = """test""" class _a ( _lowerCAmelCase ): A = 42 A = 42 A = 42 def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = Split.train, SCREAMING_SNAKE_CASE_ = None, ) -> Dict: warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""", lowerCamelCase__, ) UpperCAmelCase_: List[Any] = args UpperCAmelCase_: Optional[int] = glue_processors[args.task_name]() UpperCAmelCase_: List[str] = glue_output_modes[args.task_name] if isinstance(lowerCamelCase__, lowerCamelCase__ ): try: UpperCAmelCase_: Tuple = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file UpperCAmelCase_: str = os.path.join( cache_dir if cache_dir is not None else args.data_dir, f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}', ) UpperCAmelCase_: str = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_: str = label_list[2], label_list[1] UpperCAmelCase_: Optional[Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_: Optional[Any] = cached_features_file + """.lock""" with FileLock(lowerCamelCase__ ): if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache: UpperCAmelCase_: Dict = time.time() UpperCAmelCase_: List[str] = torch.load(lowerCamelCase__ ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start ) else: logger.info(f'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: UpperCAmelCase_: Any = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: UpperCAmelCase_: List[str] = self.processor.get_test_examples(args.data_dir ) else: UpperCAmelCase_: int = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: UpperCAmelCase_: str = examples[:limit_length] UpperCAmelCase_: Tuple = glue_convert_examples_to_features( lowerCamelCase__, lowerCamelCase__, max_length=args.max_seq_length, label_list=lowerCamelCase__, output_mode=self.output_mode, ) UpperCAmelCase_: Union[str, Any] = time.time() torch.save(self.features, lowerCamelCase__ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__(self ) -> Optional[int]: return len(self.features ) def __getitem__(self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: return self.features[i] def __snake_case (self ) -> Union[str, Any]: return self.label_list
556
from __future__ import annotations def A ( lowercase__ : int ) -> list[int]: UpperCamelCase__ :Union[str, Any] = [True] * limit UpperCamelCase__ :int = False UpperCamelCase__ :Optional[Any] = False UpperCamelCase__ :str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase__ :List[Any] = i * 2 while index < limit: UpperCamelCase__ :Tuple = False UpperCamelCase__ :Tuple = index + i UpperCamelCase__ :str = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def A ( lowercase__ : int = 100_0000 ) -> int: UpperCamelCase__ :Any = prime_sieve(lowercase__ ) UpperCamelCase__ :Optional[int] = 0 UpperCamelCase__ :Optional[Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): UpperCamelCase__ :Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase__ :Union[str, Any] = j - i UpperCamelCase__ :Any = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
45
0
def __a ( A__ : int = 50 ): SCREAMING_SNAKE_CASE = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'{solution() = }')
16
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ): UpperCamelCase__ :Optional[Any] = parent UpperCamelCase__ :Dict = batch_size UpperCamelCase__ :Tuple = seq_length UpperCamelCase__ :Dict = is_training UpperCamelCase__ :List[str] = use_input_mask UpperCamelCase__ :Optional[Any] = use_token_type_ids UpperCamelCase__ :Tuple = use_labels UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Tuple = hidden_size UpperCamelCase__ :Optional[Any] = num_hidden_layers UpperCamelCase__ :int = num_attention_heads UpperCamelCase__ :Optional[int] = intermediate_multiple_size UpperCamelCase__ :Optional[Any] = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout UpperCamelCase__ :List[Any] = attention_dropout UpperCamelCase__ :List[str] = weight_tying UpperCamelCase__ :List[str] = max_position_embeddings UpperCamelCase__ :Dict = type_vocab_size UpperCamelCase__ :List[Any] = type_sequence_label_size UpperCamelCase__ :List[str] = initializer_range UpperCamelCase__ :int = num_labels UpperCamelCase__ :Dict = num_choices UpperCamelCase__ :Any = scope def __a ( self :Any ): UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :str = None if self.use_input_mask: UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __a ( self :Union[str, Any] ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def __a ( self :Union[str, Any] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs() UpperCamelCase__ :Optional[int] = True return config, input_ids, input_mask, token_labels def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ): UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ): UpperCamelCase__ :List[str] = True UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ): UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = True UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() # first forward pass UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0] UpperCamelCase__ :Union[str, Any] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0] # select random slice UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) def __a ( self :Tuple ): UpperCamelCase__ :int = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () _snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () _snake_case : str = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) _snake_case : Union[str, Any] = False _snake_case : Dict = False _snake_case : List[str] = False _snake_case : Optional[int] = False def __a ( self :List[Any] ): UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self ) UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Dict ): self.config_tester.run_common_tests() def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): # This regression test was failing with PyTorch < 1.3 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ :Dict = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def __a ( self :int ): UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b""" UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] UpperCamelCase__ :Union[str, Any] = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = [] for prompt in prompts: UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 ) UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) predicted_outputs += generated_string self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
45
0
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase : def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=16 , lowerCAmelCase=[32, 64, 128] , lowerCAmelCase=[1, 2, 1] , lowerCAmelCase=[2, 2, 4] , lowerCAmelCase=2 , lowerCAmelCase=2.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.02 , lowerCAmelCase=1e-5 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=10 , lowerCAmelCase=8 , lowerCAmelCase=["stage1", "stage2"] , lowerCAmelCase=[1, 2] , ) -> Dict: '''simple docstring''' _lowercase =parent _lowercase =batch_size _lowercase =image_size _lowercase =patch_size _lowercase =num_channels _lowercase =embed_dim _lowercase =hidden_sizes _lowercase =depths _lowercase =num_heads _lowercase =window_size _lowercase =mlp_ratio _lowercase =qkv_bias _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =drop_path_rate _lowercase =hidden_act _lowercase =use_absolute_embeddings _lowercase =patch_norm _lowercase =layer_norm_eps _lowercase =initializer_range _lowercase =is_training _lowercase =scope _lowercase =use_labels _lowercase =type_sequence_label_size _lowercase =encoder_stride _lowercase =out_features _lowercase =out_indices def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase =None if self.use_labels: _lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase =self.get_config() return config, pixel_values, labels def A__ ( self ) -> Optional[int]: '''simple docstring''' return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any: '''simple docstring''' _lowercase =FocalNetModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowercase =model(lowerCamelCase__ ) _lowercase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowercase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple: '''simple docstring''' _lowercase =FocalNetBackbone(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowercase =model(lowerCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _lowercase =None _lowercase =FocalNetBackbone(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowercase =model(lowerCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]: '''simple docstring''' _lowercase =FocalNetForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowercase =model(lowerCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowercase =1 _lowercase =FocalNetForMaskedImageModeling(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowercase =model(lowerCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple: '''simple docstring''' _lowercase =self.type_sequence_label_size _lowercase =FocalNetForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowercase =model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowercase =1 _lowercase =FocalNetForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowercase =model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A__ ( self ) -> int: '''simple docstring''' _lowercase =self.prepare_config_and_inputs() _lowercase =config_and_inputs _lowercase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _a = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) _a = ( {"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification} if is_torch_available() else {} ) _a = False _a = False _a = False _a = False _a = False def A__ ( self ) -> Optional[int]: '''simple docstring''' _lowercase =FocalNetModelTester(self ) _lowercase =ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ ) def A__ ( self ) -> Union[str, Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self ) -> Optional[Any]: '''simple docstring''' return def A__ ( self ) -> List[Any]: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def A__ ( self ) -> Dict: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase__ ) def A__ ( self ) -> str: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ ) def A__ ( self ) -> Optional[int]: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def A__ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def A__ ( self ) -> Any: '''simple docstring''' pass def A__ ( self ) -> Union[str, Any]: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowercase =model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowercase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def A__ ( self ) -> int: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowercase =model_class(lowerCamelCase__ ) _lowercase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase =[*signature.parameters.keys()] _lowercase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any: '''simple docstring''' _lowercase =model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): _lowercase =model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) _lowercase =outputs.hidden_states _lowercase =getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ ) # FocalNet has a different seq_length _lowercase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowercase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _lowercase =outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ ) _lowercase =reshaped_hidden_states[0].shape _lowercase =( reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def A__ ( self ) -> Any: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs_for_common() _lowercase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowercase =True self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase =True self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def A__ ( self ) -> int: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs_for_common() _lowercase =3 _lowercase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowercase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowercase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowercase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowercase =True self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase =True self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) ) @slow def A__ ( self ) -> Dict: '''simple docstring''' for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase =FocalNetModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs_for_common() _lowercase =_config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: _lowercase =model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def A__ ( self ) -> Tuple: '''simple docstring''' return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def A__ ( self ) -> List[str]: '''simple docstring''' _lowercase =FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ ) _lowercase =self.default_image_processor _lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _lowercase =image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): _lowercase =model(**lowerCamelCase__ ) # verify the logits _lowercase =torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) _lowercase =torch.tensor([0.2166, -0.4368, 0.2191] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _a = (FocalNetBackbone,) if is_torch_available() else () _a = FocalNetConfig _a = False def A__ ( self ) -> Union[str, Any]: '''simple docstring''' _lowercase =FocalNetModelTester(self )
291
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def A ( lowercase__ : dict ) -> tuple: return (data["data"], data["target"]) def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier: UpperCamelCase__ :Tuple = XGBClassifier() classifier.fit(lowercase__ , lowercase__ ) return classifier def A ( ) -> None: UpperCamelCase__ :str = load_iris() UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split( lowercase__ , lowercase__ , test_size=0.25 ) UpperCamelCase__ :Optional[int] = iris["""target_names"""] # Create an XGBoost Classifier from the training data UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
45
0
"""simple docstring""" from math import factorial def a ( __snake_case : int, __snake_case : int ): '''simple docstring''' if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(lowercase__ ) // (factorial(lowercase__ ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( "If a class of 40 students must be arranged into groups of", f'''4 for group projects, there are {combinations(40, 4)} ways''', "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", f'''are {combinations(10, 3)} ways that first, second and''', "third place can be awarded.", )
608
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A ( lowercase__ : Optional[int] ) -> Optional[Any]: UpperCamelCase__ :Union[str, Any] = {} UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""] UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
45
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Any = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowercase ( _A ): _a : Tuple = """sew-d""" def __init__( self , a=3_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a=2 , a=5_1_2 , a=2_5_6 , a=True , a=True , a=("p2c", "c2p") , a="layer_norm" , a="gelu_python" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.1 , a=0.02 , a=1e-7 , a=1e-5 , a="group" , a="gelu" , a=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a=False , a=1_2_8 , a=1_6 , a=True , a=0.05 , a=1_0 , a=2 , a=0.0 , a=1_0 , a=0 , a="mean" , a=False , a=False , a=2_5_6 , a=0 , a=1 , a=2 , **a , ): super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ ) snake_case__ : Optional[int] =hidden_size snake_case__ : Optional[Any] =feat_extract_norm snake_case__ : Any =feat_extract_activation snake_case__ : Any =list(lowerCamelCase__ ) snake_case__ : Optional[Any] =list(lowerCamelCase__ ) snake_case__ : Any =list(lowerCamelCase__ ) snake_case__ : str =conv_bias snake_case__ : Any =num_conv_pos_embeddings snake_case__ : List[str] =num_conv_pos_embedding_groups snake_case__ : List[Any] =len(self.conv_dim ) snake_case__ : Union[str, Any] =num_hidden_layers snake_case__ : Dict =intermediate_size snake_case__ : str =squeeze_factor snake_case__ : Optional[Any] =max_position_embeddings snake_case__ : Tuple =position_buckets snake_case__ : int =share_att_key snake_case__ : str =relative_attention snake_case__ : List[str] =norm_rel_ebd snake_case__ : Any =list(lowerCamelCase__ ) snake_case__ : Optional[Any] =hidden_act snake_case__ : List[Any] =num_attention_heads snake_case__ : Tuple =hidden_dropout snake_case__ : int =attention_dropout snake_case__ : List[str] =activation_dropout snake_case__ : Optional[int] =feat_proj_dropout snake_case__ : int =final_dropout snake_case__ : Optional[int] =layer_norm_eps snake_case__ : str =feature_layer_norm_eps snake_case__ : Union[str, Any] =initializer_range snake_case__ : int =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ : int =apply_spec_augment snake_case__ : int =mask_time_prob snake_case__ : List[Any] =mask_time_length snake_case__ : List[str] =mask_time_min_masks snake_case__ : int =mask_feature_prob snake_case__ : Optional[int] =mask_feature_length snake_case__ : str =mask_feature_min_masks # ctc loss snake_case__ : str =ctc_loss_reduction snake_case__ : Optional[int] =ctc_zero_infinity # sequence classification snake_case__ : str =use_weighted_layer_sum snake_case__ : int =classifier_proj_size @property def lowercase__ ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
385
def A ( lowercase__ : int ) -> Optional[Any]: stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() UpperCamelCase = [int(item) for item in user_input.split(",")] print(stooge_sort(unsorted))
45
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowerCamelCase : List[str] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={"help": "The column name of the images in the files."} ) UpperCAmelCase_ = field(default=__lowerCamelCase , metadata={"help": "A folder containing the training data."} ) UpperCAmelCase_ = field(default=__lowerCamelCase , metadata={"help": "A folder containing the validation data."} ) UpperCAmelCase_ = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def A_ ( self : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = {} if self.train_dir is not None: SCREAMING_SNAKE_CASE__ : int = self.train_dir if self.validation_dir is not None: SCREAMING_SNAKE_CASE__ : List[str] = self.validation_dir SCREAMING_SNAKE_CASE__ : Optional[int] = data_files if data_files else None @dataclass class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) UpperCAmelCase_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) UpperCAmelCase_ = field(default=__lowerCamelCase , metadata={"help": "Name or path of preprocessor config."} ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) UpperCAmelCase_ = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) UpperCAmelCase_ = field( default=__lowerCamelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = field( default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.stack([example["pixel_values"] for example in examples] ) return {"pixel_values": pixel_values} def _a ( ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mae" , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[str] = training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE__ : Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE__ : List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. SCREAMING_SNAKE_CASE__ : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. SCREAMING_SNAKE_CASE__ : int = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: SCREAMING_SNAKE_CASE__ : Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = split["""train"""] SCREAMING_SNAKE_CASE__ : Any = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE__ : Optional[int] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: SCREAMING_SNAKE_CASE__ : Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMAEConfig() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) # adapt config config.update( { "mask_ratio": model_args.mask_ratio, "norm_pix_loss": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: SCREAMING_SNAKE_CASE__ : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: SCREAMING_SNAKE_CASE__ : Tuple = ViTImageProcessor() # create model if model_args.model_name_or_path: SCREAMING_SNAKE_CASE__ : Any = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) SCREAMING_SNAKE_CASE__ : Optional[int] = ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: SCREAMING_SNAKE_CASE__ : Optional[Any] = ds["""train"""].column_names else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = data_args.image_column_name elif "image" in column_names: SCREAMING_SNAKE_CASE__ : Optional[Any] = """image""" elif "img" in column_names: SCREAMING_SNAKE_CASE__ : List[str] = """img""" else: SCREAMING_SNAKE_CASE__ : List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: SCREAMING_SNAKE_CASE__ : List[str] = image_processor.size["""shortest_edge"""] else: SCREAMING_SNAKE_CASE__ : int = (image_processor.size["""height"""], image_processor.size["""width"""]) SCREAMING_SNAKE_CASE__ : Any = Compose( [ Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(SCREAMING_SNAKE_CASE__ : Tuple ): SCREAMING_SNAKE_CASE__ : List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate SCREAMING_SNAKE_CASE__ : Tuple = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: SCREAMING_SNAKE_CASE__ : Any = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE__ : Any = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE__ : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE__ : Dict = last_checkpoint SCREAMING_SNAKE_CASE__ : Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: SCREAMING_SNAKE_CASE__ : int = trainer.evaluate() trainer.log_metrics("eval" , lowercase__ ) trainer.save_metrics("eval" , lowercase__ ) # Write model card and (optionally) push to hub SCREAMING_SNAKE_CASE__ : Optional[int] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' main() if __name__ == "__main__": main()
663
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]: UpperCamelCase__ :str = SavedModel() UpperCamelCase__ :List[str] = [] with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowercase__ )] ) with open(lowercase__ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) UpperCamelCase__ :Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ ) UpperCamelCase__ :List[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowercase__ ) if strict and len(lowercase__ ) > 0: raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(lowercase__ ) > 0: print(f"""Found the following incompatible ops for the opset {opset}:""" ) print(*lowercase__ , sep="""\n""" ) else: print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) UpperCamelCase = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
45
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = torch.device("""cpu""") def _A () -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im def _A (__a ) -> Any: """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] ) def _A (__a , __a , __a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = dct.pop(lowercase__ ) SCREAMING_SNAKE_CASE_ : Any = val def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = [] for k in state_dict.keys(): SCREAMING_SNAKE_CASE_ : Dict = k if ".pwconv" in k: SCREAMING_SNAKE_CASE_ : Any = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: SCREAMING_SNAKE_CASE_ : str = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: SCREAMING_SNAKE_CASE_ : Union[str, Any] = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: SCREAMING_SNAKE_CASE_ : int = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: SCREAMING_SNAKE_CASE_ : Union[str, Any] = k_new.split('''.''' ) if ls[2].isdigit(): SCREAMING_SNAKE_CASE_ : List[str] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: SCREAMING_SNAKE_CASE_ : List[Any] = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _A (__a , __a , __a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ : Any = 10_00 SCREAMING_SNAKE_CASE_ : int = """huggingface/label-files""" SCREAMING_SNAKE_CASE_ : Dict = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE_ : Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) ) SCREAMING_SNAKE_CASE_ : str = {int(lowercase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ : Optional[int] = idalabel SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": SCREAMING_SNAKE_CASE_ : Optional[int] = [3, 3, 6, 4] SCREAMING_SNAKE_CASE_ : Dict = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": SCREAMING_SNAKE_CASE_ : Tuple = [3, 3, 9, 6] SCREAMING_SNAKE_CASE_ : Tuple = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": SCREAMING_SNAKE_CASE_ : int = [4, 3, 10, 5] SCREAMING_SNAKE_CASE_ : Dict = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": SCREAMING_SNAKE_CASE_ : Any = [4, 4, 12, 6] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): SCREAMING_SNAKE_CASE_ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='''cpu''' , check_hash=lowercase__ ) else: SCREAMING_SNAKE_CASE_ : str = torch.load(lowercase__ , map_location='''cpu''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint SCREAMING_SNAKE_CASE_ : Optional[int] = create_rename_keys(lowercase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) # load HuggingFace model SCREAMING_SNAKE_CASE_ : Optional[int] = SwiftFormerForImageClassification(lowercase__ ).eval() hf_model.load_state_dict(lowercase__ ) # prepare test inputs SCREAMING_SNAKE_CASE_ : Dict = prepare_img() SCREAMING_SNAKE_CASE_ : int = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase__ , return_tensors='''pt''' ) # compare outputs from both models SCREAMING_SNAKE_CASE_ : int = get_expected_output(lowercase__ ) SCREAMING_SNAKE_CASE_ : Dict = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase__ , atol=1e-3 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' ) hf_model.save_pretrained(lowercase__ ) if __name__ == "__main__": UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
512
from __future__ import annotations def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]: UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) UpperCamelCase__ :Optional[Any] = { """a""": 0.08497, """b""": 0.01492, """c""": 0.02202, """d""": 0.04253, """e""": 0.11162, """f""": 0.02228, """g""": 0.02015, """h""": 0.06094, """i""": 0.07546, """j""": 0.00153, """k""": 0.01292, """l""": 0.04025, """m""": 0.02406, """n""": 0.06749, """o""": 0.07507, """p""": 0.01929, """q""": 0.00095, """r""": 0.07587, """s""": 0.06327, """t""": 0.09356, """u""": 0.02758, """v""": 0.00978, """w""": 0.02560, """x""": 0.00150, """y""": 0.01994, """z""": 0.00077, } else: # Custom frequencies dictionary UpperCamelCase__ :Optional[int] = frequencies_dict if not case_sensitive: UpperCamelCase__ :int = ciphertext.lower() # Chi squared statistic values UpperCamelCase__ :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowercase__ ) ): UpperCamelCase__ :int = """""" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter UpperCamelCase__ :Optional[int] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: UpperCamelCase__ :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary UpperCamelCase__ :Union[str, Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] UpperCamelCase__ :int = min( lowercase__ , key=lowercase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
45
0
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ): super().__init__() __lowerCamelCase = value_function __lowerCamelCase = unet __lowerCamelCase = scheduler __lowerCamelCase = env __lowerCamelCase = env.get_dataset() __lowerCamelCase = {} for key in self.data.keys(): try: __lowerCamelCase = self.data[key].mean() except: # noqa: E722 pass __lowerCamelCase = {} for key in self.data.keys(): try: __lowerCamelCase = self.data[key].std() except: # noqa: E722 pass __lowerCamelCase = env.observation_space.shape[0] __lowerCamelCase = env.action_space.shape[0] def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ): return (x_in - self.means[key]) / self.stds[key] def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ): return x_in * self.stds[key] + self.means[key] def lowerCamelCase_ ( self , UpperCAmelCase ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): for key, val in cond.items(): __lowerCamelCase = val.clone() return x_in def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): __lowerCamelCase = x.shape[0] __lowerCamelCase = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model __lowerCamelCase = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models __lowerCamelCase = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample __lowerCamelCase = torch.autograd.grad([y.sum()] , [x] )[0] __lowerCamelCase = self.scheduler._get_variance(lowerCamelCase__ ) __lowerCamelCase = torch.exp(0.5 * posterior_variance ) __lowerCamelCase = model_std * grad __lowerCamelCase = 0 __lowerCamelCase = x.detach() __lowerCamelCase = x + scale * grad __lowerCamelCase = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) __lowerCamelCase = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg __lowerCamelCase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) __lowerCamelCase = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) __lowerCamelCase = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self , UpperCAmelCase , UpperCAmelCase=6_4 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=0.1 ): # normalize the observations and create batch dimension __lowerCamelCase = self.normalize(lowerCamelCase__ , """observations""" ) __lowerCamelCase = obs[None].repeat(lowerCamelCase__ , axis=0 ) __lowerCamelCase = {0: self.to_torch(lowerCamelCase__ )} __lowerCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) __lowerCamelCase = randn_tensor(lowerCamelCase__ , device=self.unet.device ) __lowerCamelCase = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) __lowerCamelCase = self.to_torch(lowerCamelCase__ ) # run the diffusion process __lowerCamelCase = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value __lowerCamelCase = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() __lowerCamelCase = x[sorted_idx] __lowerCamelCase = sorted_values[:, :, : self.action_dim] __lowerCamelCase = actions.detach().cpu().numpy() __lowerCamelCase = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: __lowerCamelCase = 0 else: # if we didn't run value guiding, select a random action __lowerCamelCase = np.random.randint(0 , lowerCamelCase__ ) __lowerCamelCase = denorm_actions[selected_index, 0] return denorm_actions
479
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
45
0
'''simple docstring''' from math import sqrt def _A ( UpperCAmelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(sqrt(lowercase__ ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _A ( UpperCAmelCase = 10001 ): '''simple docstring''' A__ = 0 A__ = 1 while count != nth and number < 3: number += 1 if is_prime(lowercase__ ): count += 1 while count != nth: number += 2 if is_prime(lowercase__ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
531
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase = get_tests_dir("fixtures") UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json") UpperCamelCase = get_tests_dir("fixtures/dummy-config.json") class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[int] = 0 def __a ( self :str ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ :List[str] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) config.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) # make sure private variable is not incorrectly saved UpperCamelCase__ :Tuple = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): with self.assertRaisesRegex( lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def __a ( self :List[Any] ): with self.assertRaisesRegex( lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" ) def __a ( self :int ): with self.assertRaisesRegex( lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def __a ( self :Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def __a ( self :Dict ): try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __a ( self :Optional[int] ): class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : Optional[int] = True try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # If remote code is not set, the default is to use local UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
45
0
'''simple docstring''' import argparse import json import subprocess def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ): """simple docstring""" __magic_name__ : str = [] __magic_name__ : List[str] = ( f'curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"' """ https://api.github.com/repos/huggingface/transformers/actions/runners""" ) __magic_name__ : Any = subprocess.run(lowercase__ , shell=lowercase__ , stdout=subprocess.PIPE ) __magic_name__ : List[str] = output.stdout.decode('utf-8' ) __magic_name__ : Union[str, Any] = json.loads(lowercase__ ) __magic_name__ : Tuple = status["""runners"""] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(lowercase__ ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(lowercase__ ) ) if len(lowercase__ ) > 0: __magic_name__ : Dict = """\n""".join([x['name'] for x in offline_runners] ) raise ValueError(f'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def lowerCamelCase ( lowerCAmelCase : Tuple ): """simple docstring""" return values.split(',' ) lowerCAmelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) lowerCAmelCase :int = parser.parse_args() get_runner_status(args.target_runners, args.token)
561
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ): super().__init__() UpperCamelCase__ :Tuple = value_function UpperCamelCase__ :Optional[int] = unet UpperCamelCase__ :List[str] = scheduler UpperCamelCase__ :Dict = env UpperCamelCase__ :Dict = env.get_dataset() UpperCamelCase__ :Union[str, Any] = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].mean() except: # noqa: E722 pass UpperCamelCase__ :Any = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].std() except: # noqa: E722 pass UpperCamelCase__ :List[Any] = env.observation_space.shape[0] UpperCamelCase__ :List[str] = env.action_space.shape[0] def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ): return (x_in - self.means[key]) / self.stds[key] def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): return x_in * self.stds[key] + self.means[key] def __a ( self :Any , lowerCamelCase__ :int ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): for key, val in cond.items(): UpperCamelCase__ :str = val.clone() return x_in def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = x.shape[0] UpperCamelCase__ :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ ) UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance ) UpperCamelCase__ :Dict = model_std * grad UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Dict = x.detach() UpperCamelCase__ :int = x + scale * grad UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ): # normalize the observations and create batch dimension UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" ) UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 ) UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )} UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device ) UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() UpperCamelCase__ :Dict = x[sorted_idx] UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim] UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy() UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: UpperCamelCase__ :List[str] = 0 else: # if we didn't run value guiding, select a random action UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ ) UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0] return denorm_actions
45
0
'''simple docstring''' def a__ ( ) -> int: """simple docstring""" return 1 def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase__ ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowercase__ ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowercase__ ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowercase__ ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(lowercase__ ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(lowercase__ ) def a__ ( _SCREAMING_SNAKE_CASE : int = 2_00 ) -> int: """simple docstring""" return two_pound(lowercase__ ) if __name__ == "__main__": print(solution(int(input().strip())))
71
def A ( lowercase__ : int ) -> bool: if num < 0: return False UpperCamelCase__ :int = num UpperCamelCase__ :int = 0 while num > 0: UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
45
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar a : int = TypeVar('T') class _a ( Generic[T] ): def __init__(self, SCREAMING_SNAKE_CASE_ = True ) -> List[Any]: UpperCAmelCase_: dict[T, list[T]] = {} # dictionary of lists UpperCAmelCase_: Optional[int] = directed def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) self.adj_list[destination_vertex].append(lowerCamelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) UpperCAmelCase_: Tuple = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(lowerCamelCase__ ) UpperCAmelCase_: Any = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: UpperCAmelCase_: List[str] = [destination_vertex] UpperCAmelCase_: Tuple = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) UpperCAmelCase_: List[Any] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: UpperCAmelCase_: Any = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: UpperCAmelCase_: Optional[Any] = [destination_vertex] UpperCAmelCase_: Any = [] return self def __repr__(self ) -> List[Any]: return pformat(self.adj_list )
556
from __future__ import annotations def A ( lowercase__ : list[int] ) -> bool: return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
45
0
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def __a ( A__ : str , A__ : str , A__ : Optional[str] = None ): if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release: # old versions of hfh don't url-encode the file path SCREAMING_SNAKE_CASE = quote(lowercase__ ) return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type="dataset" , revision=lowercase__ )
16
from __future__ import annotations class lowerCAmelCase_ : """simple docstring""" def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ): UpperCamelCase__ :List[str] = key def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :List[str] = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :int = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Dict = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :List[str] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Tuple = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :Optional[int] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
45
0
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class __lowerCAmelCase : def __init__( self , lowerCAmelCase = None ) -> Dict: '''simple docstring''' if components is None: _lowercase =[] _lowercase =list(lowerCamelCase__ ) def __len__( self ) -> Optional[int]: '''simple docstring''' return len(self.__components ) def __str__( self ) -> Optional[int]: '''simple docstring''' return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")" def __add__( self , lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' _lowercase =len(self ) if size == len(lowerCamelCase__ ): _lowercase =[self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )] return Vector(lowerCamelCase__ ) else: raise Exception('must have the same size' ) def __sub__( self , lowerCAmelCase ) -> str: '''simple docstring''' _lowercase =len(self ) if size == len(lowerCamelCase__ ): _lowercase =[self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )] return Vector(lowerCamelCase__ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , lowerCAmelCase ) -> Dict: '''simple docstring''' ... @overload def __mul__( self , lowerCAmelCase ) -> List[Any]: '''simple docstring''' ... def __mul__( self , lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCamelCase__ , (float, int) ): _lowercase =[c * other for c in self.__components] return Vector(lowerCamelCase__ ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ): _lowercase =len(self ) _lowercase =[self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )] return sum(lowerCamelCase__ ) else: # error case raise Exception('invalid operand!' ) def A__ ( self ) -> int: '''simple docstring''' return Vector(self.__components ) def A__ ( self , lowerCAmelCase ) -> Any: '''simple docstring''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]: '''simple docstring''' assert -len(self.__components ) <= pos < len(self.__components ) _lowercase =value def A__ ( self ) -> Any: '''simple docstring''' if len(self.__components ) == 0: raise Exception('Vector is empty' ) _lowercase =[c**2 for c in self.__components] return math.sqrt(sum(lowerCamelCase__ ) ) def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> List[Any]: '''simple docstring''' _lowercase =self * other _lowercase =self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def a ( A__ : int ) -> Vector: """simple docstring""" assert isinstance(lowercase__ , lowercase__ ) return Vector([0] * dimension ) def a ( A__ : int , A__ : int ) -> Vector: """simple docstring""" assert isinstance(lowercase__ , lowercase__ ) and (isinstance(lowercase__ , lowercase__ )) _lowercase =[0] * dimension _lowercase =1 return Vector(lowercase__ ) def a ( A__ : float , A__ : Vector , A__ : Vector ) -> Vector: """simple docstring""" assert ( isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and (isinstance(lowercase__ , (int, float) )) ) return x * scalar + y def a ( A__ : int , A__ : int , A__ : int ) -> Vector: """simple docstring""" random.seed(lowercase__ ) _lowercase =[random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )] return Vector(lowercase__ ) class __lowerCAmelCase : def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]: '''simple docstring''' _lowercase =matrix _lowercase =w _lowercase =h def __str__( self ) -> str: '''simple docstring''' _lowercase ="""""" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , lowerCAmelCase ) -> List[Any]: '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): _lowercase =[] for i in range(self.__height ): _lowercase =[ self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ ) for j in range(self.__width ) ] matrix.append(lowerCamelCase__ ) return Matrix(lowerCamelCase__ , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , lowerCAmelCase ) -> int: '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): _lowercase =[] for i in range(self.__height ): _lowercase =[ self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ ) for j in range(self.__width ) ] matrix.append(lowerCamelCase__ ) return Matrix(lowerCamelCase__ , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , lowerCAmelCase ) -> List[Any]: '''simple docstring''' ... @overload def __mul__( self , lowerCAmelCase ) -> Any: '''simple docstring''' ... def __mul__( self , lowerCAmelCase ) -> str: '''simple docstring''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector if len(lowerCamelCase__ ) == self.__width: _lowercase =zero_vector(self.__height ) for i in range(self.__height ): _lowercase =[ self.__matrix[i][j] * other.component(lowerCamelCase__ ) for j in range(self.__width ) ] ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar _lowercase =[ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(lowerCamelCase__ , self.__width , self.__height ) return None def A__ ( self ) -> List[str]: '''simple docstring''' return self.__height def A__ ( self ) -> Optional[int]: '''simple docstring''' return self.__width def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]: '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: _lowercase =value else: raise Exception('change_component: indices out of bounds' ) def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]: '''simple docstring''' if self.__height != self.__width: raise Exception('Matrix is not square' ) _lowercase =self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(lowerCamelCase__ ) ): _lowercase =minor[i][:y] + minor[i][y + 1 :] return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant() def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple: '''simple docstring''' if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ ) else: raise Exception('Indices out of bounds' ) def A__ ( self ) -> Optional[int]: '''simple docstring''' if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: _lowercase =[ self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width ) ] return sum(lowerCamelCase__ ) def a ( A__ : int ) -> Matrix: """simple docstring""" _lowercase =[[0] * n for _ in range(lowercase__ )] return Matrix(lowercase__ , lowercase__ , lowercase__ ) def a ( A__ : int , A__ : int , A__ : int , A__ : int ) -> Matrix: """simple docstring""" random.seed(lowercase__ ) _lowercase =[ [random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ ) ] return Matrix(lowercase__ , lowercase__ , lowercase__ )
291
import random def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int: UpperCamelCase__ :List[Any] = a[left_index] UpperCamelCase__ :Dict = left_index + 1 for j in range(left_index + 1 , lowercase__ ): if a[j] < pivot: UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j] i += 1 UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index] return i - 1 def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]: if left < right: UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 ) UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ ) quick_sort_random( lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point quick_sort_random( lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point def A ( ) -> List[Any]: UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip() UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )] quick_sort_random(lowercase__ , 0 , len(lowercase__ ) ) print(lowercase__ ) if __name__ == "__main__": main()
45
0
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { "b0": efficientnet.EfficientNetBa, "b1": efficientnet.EfficientNetBa, "b2": efficientnet.EfficientNetBa, "b3": efficientnet.EfficientNetBa, "b4": efficientnet.EfficientNetBa, "b5": efficientnet.EfficientNetBa, "b6": efficientnet.EfficientNetBa, "b7": efficientnet.EfficientNetBa, } __lowerCamelCase = { "b0": { "hidden_dim": 12_80, "width_coef": 1.0, "depth_coef": 1.0, "image_size": 2_24, "dropout_rate": 0.2, "dw_padding": [], }, "b1": { "hidden_dim": 12_80, "width_coef": 1.0, "depth_coef": 1.1, "image_size": 2_40, "dropout_rate": 0.2, "dw_padding": [16], }, "b2": { "hidden_dim": 14_08, "width_coef": 1.1, "depth_coef": 1.2, "image_size": 2_60, "dropout_rate": 0.3, "dw_padding": [5, 8, 16], }, "b3": { "hidden_dim": 15_36, "width_coef": 1.2, "depth_coef": 1.4, "image_size": 3_00, "dropout_rate": 0.3, "dw_padding": [5, 18], }, "b4": { "hidden_dim": 17_92, "width_coef": 1.4, "depth_coef": 1.8, "image_size": 3_80, "dropout_rate": 0.4, "dw_padding": [6], }, "b5": { "hidden_dim": 20_48, "width_coef": 1.6, "depth_coef": 2.2, "image_size": 4_56, "dropout_rate": 0.4, "dw_padding": [13, 27], }, "b6": { "hidden_dim": 23_04, "width_coef": 1.8, "depth_coef": 2.6, "image_size": 5_28, "dropout_rate": 0.5, "dw_padding": [31], }, "b7": { "hidden_dim": 25_60, "width_coef": 2.0, "depth_coef": 3.1, "image_size": 6_00, "dropout_rate": 0.5, "dw_padding": [18], }, } def a ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ :Optional[int] = EfficientNetConfig() UpperCAmelCase_ :Union[str, Any] = CONFIG_MAP[model_name]["""hidden_dim"""] UpperCAmelCase_ :int = CONFIG_MAP[model_name]["""width_coef"""] UpperCAmelCase_ :int = CONFIG_MAP[model_name]["""depth_coef"""] UpperCAmelCase_ :List[Any] = CONFIG_MAP[model_name]["""image_size"""] UpperCAmelCase_ :List[str] = CONFIG_MAP[model_name]["""dropout_rate"""] UpperCAmelCase_ :Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""] UpperCAmelCase_ :str = """huggingface/label-files""" UpperCAmelCase_ :str = """imagenet-1k-id2label.json""" UpperCAmelCase_ :Tuple = 1000 UpperCAmelCase_ :Any = json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type='''dataset''' ), '''r''' ) ) UpperCAmelCase_ :Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()} UpperCAmelCase_ :Any = idalabel UpperCAmelCase_ :Optional[Any] = {v: k for k, v in idalabel.items()} return config def a ( ): '''simple docstring''' UpperCAmelCase_ :Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase_ :int = Image.open(requests.get(lowercase__, stream=lowercase__ ).raw ) return im def a ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ :Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""] UpperCAmelCase_ :Any = EfficientNetImageProcessor( size={'''height''': size, '''width''': size}, image_mean=[0.485, 0.456, 0.406], image_std=[0.47853944, 0.4732864, 0.47434163], do_center_crop=lowercase__, ) return preprocessor def a ( __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ :Optional[int] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase_ :Union[str, Any] = sorted(set(lowercase__ ) ) UpperCAmelCase_ :Any = len(lowercase__ ) UpperCAmelCase_ :Tuple = {b: str(lowercase__ ) for b, i in zip(lowercase__, range(lowercase__ ) )} UpperCAmelCase_ :List[Any] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase_ :str = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase_ :Tuple = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase_ :List[str] = """efficientnet.""" + item[1] UpperCAmelCase_ :Dict = """classifier.weight""" UpperCAmelCase_ :int = """classifier.bias""" return key_mapping def a ( __snake_case : str, __snake_case : str, __snake_case : List[str] ): '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase_ :Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase_ :int = torch.from_numpy(lowercase__ ).permute(3, 2, 0, 1 ) elif "depthwise_kernel" in key: UpperCAmelCase_ :List[str] = torch.from_numpy(lowercase__ ).permute(2, 3, 0, 1 ) elif "kernel" in key: UpperCAmelCase_ :Any = torch.from_numpy(np.transpose(lowercase__ ) ) else: UpperCAmelCase_ :Any = torch.from_numpy(lowercase__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase__ ) @torch.no_grad() def a ( __snake_case : Optional[Any], __snake_case : Dict, __snake_case : Optional[int], __snake_case : int ): '''simple docstring''' UpperCAmelCase_ :str = model_classes[model_name]( include_top=lowercase__, weights='''imagenet''', input_tensor=lowercase__, input_shape=lowercase__, pooling=lowercase__, classes=1000, classifier_activation='''softmax''', ) UpperCAmelCase_ :Optional[int] = original_model.trainable_variables UpperCAmelCase_ :Optional[Any] = original_model.non_trainable_variables UpperCAmelCase_ :Tuple = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase_ :int = param.numpy() UpperCAmelCase_ :Optional[int] = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase_ :str = get_efficientnet_config(lowercase__ ) UpperCAmelCase_ :Any = EfficientNetForImageClassification(lowercase__ ).eval() UpperCAmelCase_ :List[Any] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) replace_params(lowercase__, lowercase__, lowercase__ ) # Initialize preprocessor and preprocess input image UpperCAmelCase_ :int = convert_image_processor(lowercase__ ) UpperCAmelCase_ :Any = preprocessor(images=prepare_img(), return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase_ :Dict = hf_model(**lowercase__ ) UpperCAmelCase_ :Union[str, Any] = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase_ :str = False UpperCAmelCase_ :Tuple = CONFIG_MAP[model_name]["""image_size"""] UpperCAmelCase_ :Optional[int] = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST ) UpperCAmelCase_ :Any = image.img_to_array(lowercase__ ) UpperCAmelCase_ :Union[str, Any] = np.expand_dims(lowercase__, axis=0 ) UpperCAmelCase_ :List[Any] = original_model.predict(lowercase__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase__, lowercase__, atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(lowercase__ ): os.mkdir(lowercase__ ) # Save converted model and image processor hf_model.save_pretrained(lowercase__ ) preprocessor.save_pretrained(lowercase__ ) if push_to_hub: # Push model and image processor to hub print(f'Pushing converted {model_name} to the hub...' ) UpperCAmelCase_ :Dict = f'efficientnet-{model_name}' preprocessor.push_to_hub(lowercase__ ) hf_model.push_to_hub(lowercase__ ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="b0", type=str, help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", ) parser.add_argument( "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") __lowerCamelCase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
608
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" _snake_case : Tuple = """dinat""" _snake_case : List[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ): super().__init__(**lowerCamelCase__ ) UpperCamelCase__ :Any = patch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :int = embed_dim UpperCamelCase__ :Optional[Any] = depths UpperCamelCase__ :Any = len(lowerCamelCase__ ) UpperCamelCase__ :str = num_heads UpperCamelCase__ :Optional[int] = kernel_size UpperCamelCase__ :Optional[int] = dilations UpperCamelCase__ :Tuple = mlp_ratio UpperCamelCase__ :Dict = qkv_bias UpperCamelCase__ :List[str] = hidden_dropout_prob UpperCamelCase__ :List[str] = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = drop_path_rate UpperCamelCase__ :Tuple = hidden_act UpperCamelCase__ :List[Any] = layer_norm_eps UpperCamelCase__ :Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) ) UpperCamelCase__ :Tuple = layer_scale_init_value UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
45
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowerCamelCase : int = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
385
def A ( lowercase__ : int , lowercase__ : int ) -> int: return int(input_a == input_a == 0 ) def A ( ) -> None: print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
45
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCamelCase (__lowerCamelCase , __lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = """dinat""" UpperCAmelCase_ = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Optional[int], _UpperCAmelCase : int=4, _UpperCAmelCase : Union[str, Any]=3, _UpperCAmelCase : List[Any]=6_4, _UpperCAmelCase : Any=[3, 4, 6, 5], _UpperCAmelCase : Tuple=[2, 4, 8, 1_6], _UpperCAmelCase : Optional[int]=7, _UpperCAmelCase : Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], _UpperCAmelCase : Tuple=3.0, _UpperCAmelCase : str=True, _UpperCAmelCase : Optional[int]=0.0, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : int=0.1, _UpperCAmelCase : Optional[Any]="gelu", _UpperCAmelCase : Optional[Any]=0.02, _UpperCAmelCase : Union[str, Any]=1E-5, _UpperCAmelCase : Optional[int]=0.0, _UpperCAmelCase : List[str]=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : List[Any], ) -> List[str]: """simple docstring""" super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any = patch_size SCREAMING_SNAKE_CASE__ : Any = num_channels SCREAMING_SNAKE_CASE__ : int = embed_dim SCREAMING_SNAKE_CASE__ : Optional[Any] = depths SCREAMING_SNAKE_CASE__ : Any = len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE__ : str = num_heads SCREAMING_SNAKE_CASE__ : Optional[int] = kernel_size SCREAMING_SNAKE_CASE__ : Optional[int] = dilations SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio SCREAMING_SNAKE_CASE__ : Dict = qkv_bias SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = drop_path_rate SCREAMING_SNAKE_CASE__ : Tuple = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model SCREAMING_SNAKE_CASE__ : Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) ) SCREAMING_SNAKE_CASE__ : Tuple = layer_scale_init_value SCREAMING_SNAKE_CASE__ : Optional[int] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )] SCREAMING_SNAKE_CASE__ : List[str] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
663
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ): UpperCamelCase__ :List[Any] = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :List[str] = image_size UpperCamelCase__ :Dict = min_resolution UpperCamelCase__ :List[str] = max_resolution UpperCamelCase__ :str = do_resize UpperCamelCase__ :int = size_divisor UpperCamelCase__ :Optional[int] = do_rescale def __a ( self :str ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None def __a ( self :Dict ): UpperCamelCase__ :Dict = GLPNImageProcessingTester(self ) @property def __a ( self :List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) ) def __a ( self :Optional[int] ): pass def __a ( self :Tuple ): # Initialize image_processing UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :str ): # Initialize image_processing UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :Any ): # Initialize image_processing UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
45
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Any = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = """resnet""" __UpperCamelCase = ["""basic""", """bottleneck"""] def __init__( self : List[Any] , lowercase_ : Union[str, Any]=3 , lowercase_ : str=64 , lowercase_ : Union[str, Any]=[256, 512, 1024, 2048] , lowercase_ : List[str]=[3, 4, 6, 3] , lowercase_ : Any="bottleneck" , lowercase_ : List[Any]="relu" , lowercase_ : Optional[Any]=False , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__(**lowerCamelCase__) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types)}') SCREAMING_SNAKE_CASE_ : List[str] = num_channels SCREAMING_SNAKE_CASE_ : List[Any] = embedding_size SCREAMING_SNAKE_CASE_ : str = hidden_sizes SCREAMING_SNAKE_CASE_ : Optional[Any] = depths SCREAMING_SNAKE_CASE_ : Any = layer_type SCREAMING_SNAKE_CASE_ : List[str] = hidden_act SCREAMING_SNAKE_CASE_ : int = downsample_in_first_stage SCREAMING_SNAKE_CASE_ : str = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(lowerCamelCase__) + 1)] SCREAMING_SNAKE_CASE_ : List[Any] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = version.parse("1.11" ) @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' return 1e-3
512
import math def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowercase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCamelCase = "Enter the base and the power separated by a comma: " UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. UpperCamelCase = res(xa, ya) UpperCamelCase = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
45
0
_a : List[Any] = { 'joule': 1.0, 'kilojoule': 10_00, 'megajoule': 1_00_00_00, 'gigajoule': 10_00_00_00_00, 'wattsecond': 1.0, 'watthour': 36_00, 'kilowatthour': 3_60_00_00, 'newtonmeter': 1.0, 'calorie_nutr': 41_86.8, 'kilocalorie_nutr': 4_18_68_00.00, 'electronvolt': 1.6_02_17_66_34e-19, 'britishthermalunit_it': 10_55.0_55_85, 'footpound': 1.35_5818, } def UpperCamelCase__ ( _A: str , _A: str , _A: float ): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
479
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCAmelCase_ : """simple docstring""" def __init__( self :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = parent UpperCamelCase__ :int = 13 UpperCamelCase__ :Optional[int] = 7 UpperCamelCase__ :Dict = True UpperCamelCase__ :Dict = True UpperCamelCase__ :str = True UpperCamelCase__ :List[Any] = True UpperCamelCase__ :Any = True UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Tuple = False UpperCamelCase__ :Optional[int] = 2 UpperCamelCase__ :List[str] = 99 UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Any = 32 UpperCamelCase__ :List[str] = 2 UpperCamelCase__ :int = 4 UpperCamelCase__ :List[str] = 0.1 UpperCamelCase__ :Union[str, Any] = 0.1 UpperCamelCase__ :Union[str, Any] = 5_12 UpperCamelCase__ :List[str] = 16 UpperCamelCase__ :str = 2 UpperCamelCase__ :Optional[int] = 0.02 UpperCamelCase__ :Optional[int] = 3 UpperCamelCase__ :Optional[int] = 4 UpperCamelCase__ :Optional[int] = """last""" UpperCamelCase__ :Tuple = True UpperCamelCase__ :int = None UpperCamelCase__ :Dict = 0 def __a ( self :int ): UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) UpperCamelCase__ :Union[str, Any] = None if self.use_input_lengths: UpperCamelCase__ :Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase__ :List[str] = None if self.use_token_type_ids: UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase__ :int = None UpperCamelCase__ :List[str] = None UpperCamelCase__ :List[str] = None if self.use_labels: UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ :List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ): UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask] UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ): UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Any = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ): UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ ) UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ): UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ ) UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ): UpperCamelCase__ :Any = self.num_labels UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase__ :List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = self.num_choices UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :int = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self :Tuple ): UpperCamelCase__ :str = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :str = config_and_inputs UpperCamelCase__ :Optional[Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : List[str] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : List[Any] = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _snake_case : Optional[int] = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : List[Any] = False _snake_case : Tuple = False def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self :List[str] ): UpperCamelCase__ :List[str] = TFFlaubertModelTester(self ) UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 ) def __a ( self :int ): self.config_tester.run_common_tests() def __a ( self :List[str] ): UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ ) def __a ( self :Tuple ): UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ ) @slow def __a ( self :str ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __a ( self :str ): UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCamelCase__ :Optional[int] = tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0] UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , lowerCamelCase__ ) # compare the actual values for a slice. UpperCamelCase__ :str = tf.convert_to_tensor( [ [ [-1.876_8773, -1.56_6555, 0.2707_2418], [-1.692_0038, -0.587_3505, 1.932_9599], [-2.956_3985, -1.699_3835, 1.797_2052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
45
0
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''T''') def _A ( UpperCAmelCase ): '''simple docstring''' return (position - 1) // 2 def _A ( UpperCAmelCase ): '''simple docstring''' return (2 * position) + 1 def _A ( UpperCAmelCase ): '''simple docstring''' return (2 * position) + 2 class _snake_case( Generic[T] ): def __init__(self : Any ) -> int: """simple docstring""" A__ = [] A__ = {} A__ = 0 def __len__(self : Dict ) -> Tuple: """simple docstring""" return self.elements def __repr__(self : Tuple ) -> str: """simple docstring""" return str(self.heap ) def _UpperCamelCase (self : List[str] ) -> int: """simple docstring""" return self.elements == 0 def _UpperCamelCase (self : List[str] , a : T , a : int ) -> Tuple: """simple docstring""" self.heap.append((elem, weight) ) A__ = self.elements self.elements += 1 self._bubble_up(lowerCamelCase__ ) def _UpperCamelCase (self : Tuple ) -> str: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) A__ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: A__ = self.heap[0] self._bubble_down(lowerCamelCase__ ) return elem def _UpperCamelCase (self : Union[str, Any] , a : T , a : int ) -> int: """simple docstring""" A__ = self.position_map[elem] A__ = (elem, weight) if position > 0: A__ = get_parent_position(lowerCamelCase__ ) A__ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(lowerCamelCase__ ) else: self._bubble_down(lowerCamelCase__ ) else: self._bubble_down(lowerCamelCase__ ) def _UpperCamelCase (self : Optional[int] , a : T ) -> List[Any]: """simple docstring""" A__ = self.position_map[elem] if curr_pos == 0: return None A__ = get_parent_position(lowerCamelCase__ ) A__ = self.heap[curr_pos] A__ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ ) return self._bubble_up(lowerCamelCase__ ) return None def _UpperCamelCase (self : Dict , a : T ) -> List[str]: """simple docstring""" A__ = self.position_map[elem] A__ = self.heap[curr_pos] A__ = get_child_left_position(lowerCamelCase__ ) A__ = get_child_right_position(lowerCamelCase__ ) if child_left_position < self.elements and child_right_position < self.elements: A__ = self.heap[child_left_position] A__ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ ) return self._bubble_down(lowerCamelCase__ ) if child_left_position < self.elements: A__ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ ) return self._bubble_down(lowerCamelCase__ ) else: return None if child_right_position < self.elements: A__ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ ) return self._bubble_down(lowerCamelCase__ ) return None def _UpperCamelCase (self : List[Any] , a : int , a : int ) -> Any: """simple docstring""" A__ = self.heap[nodea_pos][0] A__ = self.heap[nodea_pos][0] A__ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) A__ = nodea_pos A__ = nodea_pos class _snake_case( Generic[T] ): def __init__(self : str ) -> List[Any]: """simple docstring""" A__ = {} A__ = 0 def __repr__(self : int ) -> str: """simple docstring""" return str(self.connections ) def __len__(self : Dict ) -> Optional[Any]: """simple docstring""" return self.nodes def _UpperCamelCase (self : List[Any] , a : T ) -> Optional[Any]: """simple docstring""" if node not in self.connections: A__ = {} self.nodes += 1 def _UpperCamelCase (self : Union[str, Any] , a : T , a : T , a : int ) -> Union[str, Any]: """simple docstring""" self.add_node(lowerCamelCase__ ) self.add_node(lowerCamelCase__ ) A__ = weight A__ = weight def _A ( UpperCAmelCase ,): '''simple docstring''' A__ = {node: maxsize for node in graph.connections} A__ = {node: None for node in graph.connections} A__ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowercase__ ,lowercase__ ) if priority_queue.is_empty(): return dist, parent # initialization A__ = priority_queue.extract_min() A__ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: A__ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowercase__ ,dist[neighbour] ) A__ = node # running prim's algorithm while not priority_queue.is_empty(): A__ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: A__ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowercase__ ,dist[neighbour] ) A__ = node return dist, parent
531
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device UpperCamelCase = False class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self :List[Any] ): UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :Any = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :str = generator.manual_seed(0 ) UpperCamelCase__ :str = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __a ( self :Dict ): UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = """cyberpunk 2077""" UpperCamelCase__ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :str = torch.manual_seed(0 ) UpperCamelCase__ :Dict = pipe.dual_guided( prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """ UpperCamelCase__ :List[str] = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.text_to_image( prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
45
0
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def lowerCamelCase ( lowerCAmelCase : Callable[[int | float], int | float] , lowerCAmelCase : int | float , lowerCAmelCase : int | float , lowerCAmelCase : int = 100 , ): """simple docstring""" __magic_name__ : int = x_start __magic_name__ : List[Any] = fnc(lowercase__ ) __magic_name__ : List[Any] = 0.0 for _ in range(lowercase__ ): # Approximates curve as a sequence of linear lines and sums their length __magic_name__ : Optional[int] = (x_end - x_start) / steps + xa __magic_name__ : List[Any] = fnc(lowercase__ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __magic_name__ : Dict = xa __magic_name__ : List[str] = fxa return length if __name__ == "__main__": def lowerCamelCase ( lowerCAmelCase : Dict ): """simple docstring""" return math.sin(10 * x ) print('''f(x) = sin(10 * x)''') print('''The length of the curve from x = -10 to x = 10 is:''') lowerCAmelCase :Union[str, Any] = 1_0 while i <= 1_0_0_0_0_0: print(F'With {i} steps: {line_length(f, -1_0, 1_0, i)}') i *= 1_0
561
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase_ : """simple docstring""" def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ): UpperCamelCase__ :Any = parent UpperCamelCase__ :Union[str, Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :Optional[Any] = image_size UpperCamelCase__ :Union[str, Any] = patch_size UpperCamelCase__ :Union[str, Any] = is_training UpperCamelCase__ :str = use_input_mask UpperCamelCase__ :int = use_token_type_ids UpperCamelCase__ :int = use_labels UpperCamelCase__ :List[Any] = vocab_size UpperCamelCase__ :List[str] = hidden_size UpperCamelCase__ :List[Any] = num_hidden_layers UpperCamelCase__ :List[str] = num_attention_heads UpperCamelCase__ :Tuple = intermediate_size UpperCamelCase__ :Any = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Dict = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :Union[str, Any] = type_sequence_label_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[Any] = coordinate_size UpperCamelCase__ :Tuple = shape_size UpperCamelCase__ :Dict = num_labels UpperCamelCase__ :str = num_choices UpperCamelCase__ :Tuple = scope UpperCamelCase__ :str = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCamelCase__ :List[str] = text_seq_length UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1 UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length def __a ( self :Tuple ): UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCamelCase__ :str = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase__ :List[str] = bbox[i, j, 3] UpperCamelCase__ :Optional[int] = bbox[i, j, 1] UpperCamelCase__ :Optional[Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase__ :Tuple = bbox[i, j, 2] UpperCamelCase__ :Optional[Any] = bbox[i, j, 0] UpperCamelCase__ :List[str] = tmp_coordinate UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__ :Any = None if self.use_input_mask: UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCamelCase__ :Optional[Any] = None if self.use_token_type_ids: UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCamelCase__ :List[str] = None UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCamelCase__ :Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ): UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ ) # text + image UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) UpperCamelCase__ :Tuple = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , ) UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ): UpperCamelCase__ :Optional[Any] = self.num_labels UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ ) UpperCamelCase__ :List[str] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = self.num_labels UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ): UpperCamelCase__ :Dict = 2 UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ ) UpperCamelCase__ :int = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs() ((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs UpperCamelCase__ :List[str] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _snake_case : Dict = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) _snake_case : Optional[int] = False _snake_case : List[str] = False _snake_case : Tuple = False def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ): return True def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ): UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ ) if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = { k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Tuple = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __a ( self :Dict ): UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self ) UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Any ): self.config_tester.run_common_tests() def __a ( self :Optional[int] ): UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ ) if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ): # The number of elements in the loss should be the same as the number of elements in the label UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0] ] UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCamelCase__ :Optional[Any] = -1_00 UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ ) UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) # Get keys that were added with the _prepare_for_class function UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys() UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters UpperCamelCase__ :str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCamelCase__ :Any = {0: """input_ids"""} for label_key in label_keys: UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = label_key UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCamelCase__ :Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCamelCase__ :List[str] = prepared_for_class[value] UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ ) # Send to model UpperCamelCase__ :str = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ :Dict = type self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Tuple ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @slow def __a ( self :Optional[int] ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def A ( ) -> List[str]: UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __a ( self :Optional[Any] ): return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None @slow def __a ( self :Dict ): UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) UpperCamelCase__ :List[Any] = self.default_image_processor UpperCamelCase__ :str = prepare_img() UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values UpperCamelCase__ :str = tf.constant([[1, 2]] ) UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) # verify the logits UpperCamelCase__ :int = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ ) UpperCamelCase__ :List[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
45
0
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class _snake_case (__SCREAMING_SNAKE_CASE): __A : int =(PNDMScheduler,) __A : Any =(("""num_inference_steps""", 50),) def UpperCamelCase__ ( self ,**_snake_case ): UpperCAmelCase_ : Any = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**lowerCamelCase__ ) return config def UpperCamelCase__ ( self ,_snake_case=0 ,**_snake_case ): UpperCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : int = kwargs.pop("num_inference_steps" ,lowerCamelCase__ ) UpperCAmelCase_ : List[Any] = self.dummy_sample UpperCAmelCase_ : Tuple = 0.1 * sample UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : str = self.get_scheduler_config(**lowerCamelCase__ ) UpperCAmelCase_ : Tuple = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals UpperCAmelCase_ : str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) UpperCAmelCase_ : int = scheduler_class.from_pretrained(lowerCamelCase__ ) new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals UpperCAmelCase_ : List[str] = dummy_past_residuals[:] UpperCAmelCase_ : Dict = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase_ : List[str] = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase_ : Optional[int] = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ,_snake_case=0 ,**_snake_case ): UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" ,lowerCamelCase__ ) UpperCAmelCase_ : List[str] = self.dummy_sample UpperCAmelCase_ : Union[str, Any] = 0.1 * sample UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config() UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ : int = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(lowerCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ : Optional[int] = dummy_past_residuals[:] UpperCAmelCase_ : Any = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase_ : Tuple = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Any = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase_ : Union[str, Any] = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ,**_snake_case ): UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase_ : int = self.get_scheduler_config(**lowerCamelCase__ ) UpperCAmelCase_ : str = scheduler_class(**lowerCamelCase__ ) UpperCAmelCase_ : int = 10 UpperCAmelCase_ : Any = self.dummy_model() UpperCAmelCase_ : int = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ : str = model(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase_ : Tuple = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ : Dict = model(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase_ : List[str] = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample return sample def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : int = kwargs.pop("num_inference_steps" ,lowerCamelCase__ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config() UpperCAmelCase_ : int = scheduler_class(**lowerCamelCase__ ) UpperCAmelCase_ : Dict = self.dummy_sample UpperCAmelCase_ : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase__ ,"set_timesteps" ): scheduler.set_timesteps(lowerCamelCase__ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,"set_timesteps" ): UpperCAmelCase_ : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ : Tuple = dummy_past_residuals[:] UpperCAmelCase_ : int = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase_ : Dict = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) UpperCAmelCase_ : int = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase_ : Union[str, Any] = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def UpperCamelCase__ ( self ): for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def UpperCamelCase__ ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCamelCase__ ) UpperCAmelCase_ : Tuple = self.scheduler_classes[0] UpperCAmelCase_ : Tuple = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps ,torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) ,) def UpperCamelCase__ ( self ): for beta_start, beta_end in zip([0.0001, 0.001] ,[0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ ) def UpperCamelCase__ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def UpperCamelCase__ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def UpperCamelCase__ ( self ): for t in [1, 5, 10]: self.check_over_forward(time_step=lowerCamelCase__ ) def UpperCamelCase__ ( self ): for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 1_00] ): self.check_over_forward(num_inference_steps=lowerCamelCase__ ) def UpperCamelCase__ ( self ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ : int = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Tuple = self.dummy_sample UpperCAmelCase_ : List[str] = 0.1 * sample UpperCAmelCase_ : List[Any] = self.get_scheduler_config() UpperCAmelCase_ : Any = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample def UpperCamelCase__ ( self ): with self.assertRaises(lowerCamelCase__ ): UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config() UpperCAmelCase_ : str = scheduler_class(**lowerCamelCase__ ) scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = self.full_loop() UpperCAmelCase_ : str = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ : Tuple = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase_ : str = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def UpperCamelCase__ ( self ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Dict = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.01 ) UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def UpperCamelCase__ ( self ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.01 ) UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
71
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The column name of the images in the files."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} ) _snake_case : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __a ( self :List[str] ): UpperCamelCase__ :Optional[Any] = {} if self.train_dir is not None: UpperCamelCase__ :int = self.train_dir if self.validation_dir is not None: UpperCamelCase__ :List[str] = self.validation_dir UpperCamelCase__ :Optional[int] = data_files if data_files else None @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = field( default=lowercase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) _snake_case : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} ) _snake_case : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _snake_case : float = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) _snake_case : bool = field( default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : float = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def A ( lowercase__ : Union[str, Any] ) -> Dict: UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def A ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase__ :List[str] = training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase__ :Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase__ :Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase__ :Union[str, Any] = split["""train"""] UpperCamelCase__ :Any = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ :Optional[int] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Tuple = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names else: UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase__ :Union[str, Any] = data_args.image_column_name elif "image" in column_names: UpperCamelCase__ :Optional[Any] = """image""" elif "img" in column_names: UpperCamelCase__ :List[str] = """img""" else: UpperCamelCase__ :List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""] else: UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase__ :Any = Compose( [ Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(lowercase__ : Tuple ): UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase__ :Optional[Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate UpperCamelCase__ :Tuple = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase__ :Union[str, Any] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: UpperCamelCase__ :Any = None if training_args.resume_from_checkpoint is not None: UpperCamelCase__ :int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase__ :Dict = last_checkpoint UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase__ :int = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase__ ) trainer.save_metrics("""eval""" , lowercase__ ) # Write model card and (optionally) push to hub UpperCamelCase__ :Optional[int] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def A ( lowercase__ : Union[str, Any] ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
0
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) UpperCAmelCase_: List[str] = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(lowercase__ ) DownloadCommand.register_subcommand(lowercase__ ) EnvironmentCommand.register_subcommand(lowercase__ ) RunCommand.register_subcommand(lowercase__ ) ServeCommand.register_subcommand(lowercase__ ) UserCommands.register_subcommand(lowercase__ ) AddNewModelCommand.register_subcommand(lowercase__ ) AddNewModelLikeCommand.register_subcommand(lowercase__ ) LfsCommands.register_subcommand(lowercase__ ) PTtoTFCommand.register_subcommand(lowercase__ ) # Let's go UpperCAmelCase_: Union[str, Any] = parser.parse_args() if not hasattr(lowercase__ , """func""" ): parser.print_help() exit(1 ) # Run UpperCAmelCase_: Optional[Any] = args.func(lowercase__ ) service.run() if __name__ == "__main__": main()
556
from __future__ import annotations def A ( lowercase__ : int ) -> list[int]: UpperCamelCase__ :Union[str, Any] = [True] * limit UpperCamelCase__ :int = False UpperCamelCase__ :Optional[Any] = False UpperCamelCase__ :str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase__ :List[Any] = i * 2 while index < limit: UpperCamelCase__ :Tuple = False UpperCamelCase__ :Tuple = index + i UpperCamelCase__ :str = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def A ( lowercase__ : int = 100_0000 ) -> int: UpperCamelCase__ :Any = prime_sieve(lowercase__ ) UpperCamelCase__ :Optional[int] = 0 UpperCamelCase__ :Optional[Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): UpperCamelCase__ :Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase__ :Union[str, Any] = j - i UpperCamelCase__ :Any = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
45
0
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __A : str = logging.get_logger(__name__) # pylint: disable=invalid-name class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ): super().__init__() self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) @torch.no_grad() def __call__( self : Optional[int] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 100 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : bool = True , ): if audio_length_in_s is None: SCREAMING_SNAKE_CASE = self.unet.config.sample_size / self.unet.config.sample_rate SCREAMING_SNAKE_CASE = audio_length_in_s * self.unet.config.sample_rate SCREAMING_SNAKE_CASE = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) SCREAMING_SNAKE_CASE = int(lowerCamelCase__ ) if sample_size % down_scale_factor != 0: SCREAMING_SNAKE_CASE = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) SCREAMING_SNAKE_CASE = int(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = next(iter(self.unet.parameters() ) ).dtype SCREAMING_SNAKE_CASE = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) SCREAMING_SNAKE_CASE = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ ) # set step values self.scheduler.set_timesteps(lowerCamelCase__ , device=audio.device ) SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(lowerCamelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output SCREAMING_SNAKE_CASE = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample # 2. compute previous image: x_t -> t_t-1 SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample SCREAMING_SNAKE_CASE = audio.clamp(-1 , 1 ).float().cpu().numpy() SCREAMING_SNAKE_CASE = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowerCamelCase__ )
16
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ): UpperCamelCase__ :Optional[Any] = parent UpperCamelCase__ :Dict = batch_size UpperCamelCase__ :Tuple = seq_length UpperCamelCase__ :Dict = is_training UpperCamelCase__ :List[str] = use_input_mask UpperCamelCase__ :Optional[Any] = use_token_type_ids UpperCamelCase__ :Tuple = use_labels UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Tuple = hidden_size UpperCamelCase__ :Optional[Any] = num_hidden_layers UpperCamelCase__ :int = num_attention_heads UpperCamelCase__ :Optional[int] = intermediate_multiple_size UpperCamelCase__ :Optional[Any] = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout UpperCamelCase__ :List[Any] = attention_dropout UpperCamelCase__ :List[str] = weight_tying UpperCamelCase__ :List[str] = max_position_embeddings UpperCamelCase__ :Dict = type_vocab_size UpperCamelCase__ :List[Any] = type_sequence_label_size UpperCamelCase__ :List[str] = initializer_range UpperCamelCase__ :int = num_labels UpperCamelCase__ :Dict = num_choices UpperCamelCase__ :Any = scope def __a ( self :Any ): UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :str = None if self.use_input_mask: UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __a ( self :Union[str, Any] ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def __a ( self :Union[str, Any] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs() UpperCamelCase__ :Optional[int] = True return config, input_ids, input_mask, token_labels def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ): UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ): UpperCamelCase__ :List[str] = True UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ): UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = True UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() # first forward pass UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0] UpperCamelCase__ :Union[str, Any] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0] # select random slice UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) def __a ( self :Tuple ): UpperCamelCase__ :int = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () _snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () _snake_case : str = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) _snake_case : Union[str, Any] = False _snake_case : Dict = False _snake_case : List[str] = False _snake_case : Optional[int] = False def __a ( self :List[Any] ): UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self ) UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Dict ): self.config_tester.run_common_tests() def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): # This regression test was failing with PyTorch < 1.3 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ :Dict = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def __a ( self :int ): UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b""" UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] UpperCamelCase__ :Union[str, Any] = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = [] for prompt in prompts: UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 ) UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) predicted_outputs += generated_string self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
45
0
from string import ascii_uppercase lowercase_ = {str(ord(c) - 5_5): c for c in ascii_uppercase} def a ( A__ : int , A__ : int ) -> str: """simple docstring""" if isinstance(lowercase__ , lowercase__ ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(lowercase__ , lowercase__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(lowercase__ , lowercase__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) _lowercase ="""""" _lowercase =0 _lowercase =0 while div != 1: _lowercase =divmod(lowercase__ , lowercase__ ) if base >= 11 and 9 < mod < 36: _lowercase =ALPHABET_VALUES[str(lowercase__ )] else: _lowercase =str(lowercase__ ) new_value += actual_value _lowercase =num // base _lowercase =div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(lowercase__ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 3_7): for num in range(1_0_0_0): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
291
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def A ( lowercase__ : dict ) -> tuple: return (data["data"], data["target"]) def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier: UpperCamelCase__ :Tuple = XGBClassifier() classifier.fit(lowercase__ , lowercase__ ) return classifier def A ( ) -> None: UpperCamelCase__ :str = load_iris() UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split( lowercase__ , lowercase__ , test_size=0.25 ) UpperCamelCase__ :Optional[int] = iris["""target_names"""] # Create an XGBoost Classifier from the training data UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
45
0
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __lowerCamelCase = logging.get_logger(__name__) class _snake_case ( A__ ): '''simple docstring''' UpperCamelCase__ ="""vision-encoder-decoder""" UpperCamelCase__ =True def __init__( self : List[Any] , **snake_case : List[str] ): super().__init__(**lowerCamelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'A configuraton of type {self.model_type} cannot be instantiated because ' f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' ) UpperCAmelCase_ :List[Any] = kwargs.pop('''encoder''' ) UpperCAmelCase_ :Any = encoder_config.pop('''model_type''' ) UpperCAmelCase_ :Optional[int] = kwargs.pop('''decoder''' ) UpperCAmelCase_ :Optional[Any] = decoder_config.pop('''model_type''' ) UpperCAmelCase_ :Optional[Any] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ ) UpperCAmelCase_ :List[Any] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ ) UpperCAmelCase_ :Dict = True @classmethod def snake_case_ ( cls : Union[str, Any] , snake_case : PretrainedConfig , snake_case : PretrainedConfig , **snake_case : List[Any] ): logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) UpperCAmelCase_ :Dict = True UpperCAmelCase_ :Optional[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase__ ) def snake_case_ ( self : Dict ): UpperCAmelCase_ :Any = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ :str = self.encoder.to_dict() UpperCAmelCase_ :Any = self.decoder.to_dict() UpperCAmelCase_ :List[Any] = self.__class__.model_type return output class _snake_case ( A__ ): '''simple docstring''' UpperCamelCase__ =version.parse("""1.11""" ) @property def snake_case_ ( self : int ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def snake_case_ ( self : Union[str, Any] ): return 1e-4 @property def snake_case_ ( self : List[Any] ): return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class _snake_case ( A__ ): '''simple docstring''' @property def snake_case_ ( self : Optional[Any] ): UpperCAmelCase_ :str = OrderedDict() UpperCAmelCase_ :Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase_ :List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase_ :str = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def snake_case_ ( self : int , snake_case : "PreTrainedTokenizerBase" , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , ): import torch UpperCAmelCase_ :Dict = OrderedDict() UpperCAmelCase_ :Tuple = super().generate_dummy_inputs( lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ ) UpperCAmelCase_ :str = dummy_input["""input_ids"""].shape UpperCAmelCase_ :str = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCAmelCase_ :Optional[Any] = dummy_input.pop('''input_ids''' ) UpperCAmelCase_ :List[Any] = dummy_input.pop('''attention_mask''' ) UpperCAmelCase_ :str = torch.zeros(lowerCamelCase__ ) return common_inputs class _snake_case ( A__ ): '''simple docstring''' @property def snake_case_ ( self : List[str] ): pass def snake_case_ ( self : List[str] , snake_case : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ ) def snake_case_ ( self : Dict , snake_case : PretrainedConfig , snake_case : PretrainedConfig , snake_case : str = "default" ): UpperCAmelCase_ :Tuple = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ , lowerCamelCase__ )
608
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A ( lowercase__ : Optional[int] ) -> Optional[Any]: UpperCamelCase__ :Union[str, Any] = {} UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""] UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
45
0
def A__ ( _a : int ): '''simple docstring''' if num < 0: return False snake_case__ : int =num snake_case__ : int =0 while num > 0: snake_case__ : Optional[int] =rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
385
def A ( lowercase__ : int ) -> Optional[Any]: stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() UpperCamelCase = [int(item) for item in user_input.split(",")] print(stooge_sort(unsorted))
45
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __init__( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : List[str]=1_3, _UpperCAmelCase : Any=7, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[str]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Any=5, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : List[Any]=0.1, _UpperCAmelCase : Tuple=5_1_2, _UpperCAmelCase : int=1_6, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Optional[int]=0.02, _UpperCAmelCase : Optional[Any]=4, ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE__ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE__ : str = is_training SCREAMING_SNAKE_CASE__ : int = use_attention_mask SCREAMING_SNAKE_CASE__ : Dict = use_token_type_ids SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : List[str] = vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : int = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : str = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Dict = initializer_range SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices def A_ ( self : Optional[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : List[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def A_ ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def A_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = True UpperCAmelCase_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def A_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxRobertaModelTester(self ) @slow def A_ ( self : Any ) -> Union[str, Any]: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : int = model_class_name.from_pretrained("roberta-base", from_pt=lowerCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ )
663
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]: UpperCamelCase__ :str = SavedModel() UpperCamelCase__ :List[str] = [] with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowercase__ )] ) with open(lowercase__ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) UpperCamelCase__ :Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ ) UpperCamelCase__ :List[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowercase__ ) if strict and len(lowercase__ ) > 0: raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(lowercase__ ) > 0: print(f"""Found the following incompatible ops for the opset {opset}:""" ) print(*lowercase__ , sep="""\n""" ) else: print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) UpperCamelCase = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
45
0
"""simple docstring""" from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str]=2 , lowercase_ : List[str]=3 , lowercase_ : List[str]=4 , lowercase_ : str=2 , lowercase_ : Optional[int]=7 , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : Dict=99 , lowercase_ : Optional[Any]=36 , lowercase_ : str=2 , lowercase_ : List[Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : Any=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : str=16 , lowercase_ : Tuple=2 , lowercase_ : int=0.02 , lowercase_ : List[Any]=6 , lowercase_ : List[str]=6 , lowercase_ : Optional[int]=3 , lowercase_ : Optional[int]=4 , lowercase_ : int=None , lowercase_ : Optional[Any]=1000 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = parent SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE_ : Dict = num_channels SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training SCREAMING_SNAKE_CASE_ : str = use_input_mask SCREAMING_SNAKE_CASE_ : int = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Any = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE_ : Tuple = type_vocab_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : int = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = coordinate_size SCREAMING_SNAKE_CASE_ : Tuple = shape_size SCREAMING_SNAKE_CASE_ : Dict = num_labels SCREAMING_SNAKE_CASE_ : str = num_choices SCREAMING_SNAKE_CASE_ : Tuple = scope SCREAMING_SNAKE_CASE_ : str = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) SCREAMING_SNAKE_CASE_ : List[str] = text_seq_length SCREAMING_SNAKE_CASE_ : List[str] = (image_size // patch_size) ** 2 + 1 SCREAMING_SNAKE_CASE_ : Dict = self.text_seq_length + self.image_seq_length def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox) SCREAMING_SNAKE_CASE_ : str = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : Tuple = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Optional[Any] = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : List[str] = tmp_coordinate SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : int = random_attention_mask([self.batch_size, self.text_seq_length]) SCREAMING_SNAKE_CASE_ : Optional[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = TFLayoutLMvaModel(config=lowerCamelCase__) # text + image SCREAMING_SNAKE_CASE_ : Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Tuple = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , ) SCREAMING_SNAKE_CASE_ : str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # text only SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size)) # image only SCREAMING_SNAKE_CASE_ : Tuple = model({'''pixel_values''': pixel_values} , training=lowerCamelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : List[str] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Optional[Any] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = 2 SCREAMING_SNAKE_CASE_ : Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : int = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs() (SCREAMING_SNAKE_CASE_) : Any = config_and_inputs SCREAMING_SNAKE_CASE_ : List[str] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) __UpperCamelCase = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int): '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int]=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(lowerCamelCase__) if model_class in get_values(lowerCamelCase__): SCREAMING_SNAKE_CASE_ : Optional[int] = { k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(lowerCamelCase__ , tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCamelCase__): SCREAMING_SNAKE_CASE_ : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa) elif model_class in get_values(lowerCamelCase__): SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) elif model_class in get_values(lowerCamelCase__): SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) elif model_class in get_values(lowerCamelCase__): SCREAMING_SNAKE_CASE_ : Tuple = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowerCamelCase__) if getattr(lowerCamelCase__ , '''hf_compute_loss''' , lowerCamelCase__): # The number of elements in the loss should be the same as the number of elements in the label SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__)[0] ] SCREAMING_SNAKE_CASE_ : Union[str, Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Optional[Any] = prepared_for_class.pop('''input_ids''') SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCamelCase__ , **lowerCamelCase__)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Optional[Any] = prepared_for_class.pop('''input_ids''') if "labels" in prepared_for_class: SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class["""labels"""].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: SCREAMING_SNAKE_CASE_ : Optional[Any] = -100 SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Tuple = model(lowerCamelCase__ , **lowerCamelCase__)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCamelCase__)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__) # Get keys that were added with the _prepare_for_class function SCREAMING_SNAKE_CASE_ : str = prepared_for_class.keys() - inputs_dict.keys() SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.call).parameters SCREAMING_SNAKE_CASE_ : str = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple SCREAMING_SNAKE_CASE_ : Any = {0: """input_ids"""} for label_key in label_keys: SCREAMING_SNAKE_CASE_ : Dict = signature_names.index(lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Optional[int] = label_key SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple SCREAMING_SNAKE_CASE_ : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value] SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(lowerCamelCase__) # Send to model SCREAMING_SNAKE_CASE_ : str = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' ( SCREAMING_SNAKE_CASE_ ) : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' ( SCREAMING_SNAKE_CASE_ ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Dict = type self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' ( SCREAMING_SNAKE_CASE_ ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' ( SCREAMING_SNAKE_CASE_ ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' ( SCREAMING_SNAKE_CASE_ ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__) self.assertIsNotNone(lowerCamelCase__) def _A () -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__) if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''') SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE_ : str = prepare_img() SCREAMING_SNAKE_CASE_ : Any = image_processor(images=lowerCamelCase__ , return_tensors='''tf''').pixel_values SCREAMING_SNAKE_CASE_ : str = tf.constant([[1, 2]]) SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0) # forward pass SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__) # verify the logits SCREAMING_SNAKE_CASE_ : int = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__) SCREAMING_SNAKE_CASE_ : List[Any] = tf.constant( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4))
512
from __future__ import annotations def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]: UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) UpperCamelCase__ :Optional[Any] = { """a""": 0.08497, """b""": 0.01492, """c""": 0.02202, """d""": 0.04253, """e""": 0.11162, """f""": 0.02228, """g""": 0.02015, """h""": 0.06094, """i""": 0.07546, """j""": 0.00153, """k""": 0.01292, """l""": 0.04025, """m""": 0.02406, """n""": 0.06749, """o""": 0.07507, """p""": 0.01929, """q""": 0.00095, """r""": 0.07587, """s""": 0.06327, """t""": 0.09356, """u""": 0.02758, """v""": 0.00978, """w""": 0.02560, """x""": 0.00150, """y""": 0.01994, """z""": 0.00077, } else: # Custom frequencies dictionary UpperCamelCase__ :Optional[int] = frequencies_dict if not case_sensitive: UpperCamelCase__ :int = ciphertext.lower() # Chi squared statistic values UpperCamelCase__ :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowercase__ ) ): UpperCamelCase__ :int = """""" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter UpperCamelCase__ :Optional[int] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: UpperCamelCase__ :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary UpperCamelCase__ :Union[str, Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] UpperCamelCase__ :int = min( lowercase__ , key=lowercase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
45
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) _a : str = parser.parse_args() _a : Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _a : Optional[Any] = CLIPImageProcessor() _a : List[Any] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') _a : Any = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
479
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
45
0
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _snake_case( UpperCAmelCase ): def __init__(self : str , a : Distribution , a : Any=None , a : List[Any]=None , a : List[str]=0 ) -> List[Any]: """simple docstring""" A__ = 1.0 if scale is None else scale A__ = 0.0 if loc is None else loc super().__init__(lowerCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase__ )] ) @property def _UpperCamelCase (self : Any ) -> int: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def _UpperCamelCase (self : List[Any] ) -> str: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def _UpperCamelCase (self : Optional[int] ) -> int: """simple docstring""" return self.variance.sqrt() class _snake_case( nn.Module ): def __init__(self : int , a : int , a : Dict[str, int] , a : Callable[..., Tuple[torch.Tensor]] , **a : Any ) -> int: """simple docstring""" super().__init__(**lowerCamelCase__ ) A__ = args_dim A__ = nn.ModuleList([nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) for dim in args_dim.values()] ) A__ = domain_map def _UpperCamelCase (self : Tuple , a : torch.Tensor ) -> List[str]: """simple docstring""" A__ = [proj(lowerCamelCase__ ) for proj in self.proj] return self.domain_map(*lowerCamelCase__ ) class _snake_case( nn.Module ): def __init__(self : Dict , a : Optional[int] ) -> Dict: """simple docstring""" super().__init__() A__ = function def _UpperCamelCase (self : int , a : Union[str, Any] , *a : Tuple ) -> List[Any]: """simple docstring""" return self.function(lowerCamelCase__ , *lowerCamelCase__ ) class _snake_case: __snake_case: type __snake_case: int __snake_case: Dict[str, int] def __init__(self : Tuple , a : int = 1 ) -> Dict: """simple docstring""" A__ = dim A__ = {k: dim * self.args_dim[k] for k in self.args_dim} def _UpperCamelCase (self : Union[str, Any] , a : Union[str, Any] ) -> Optional[int]: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowerCamelCase__ ) else: return Independent(self.distribution_class(*lowerCamelCase__ ) , 1 ) def _UpperCamelCase (self : Optional[int] , a : Optional[Any] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , ) -> Any: """simple docstring""" A__ = self._base_distribution(lowerCamelCase__ ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase__ , loc=lowerCamelCase__ , scale=lowerCamelCase__ , event_dim=self.event_dim ) @property def _UpperCamelCase (self : Optional[Any] ) -> Any: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def _UpperCamelCase (self : List[str] ) -> Tuple: """simple docstring""" return len(self.event_shape ) @property def _UpperCamelCase (self : Any ) -> Union[str, Any]: """simple docstring""" return 0.0 def _UpperCamelCase (self : Any , a : int ) -> str: """simple docstring""" return ParameterProjection( in_features=lowerCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _UpperCamelCase (self : Union[str, Any] , *a : torch.Tensor ) -> Dict: """simple docstring""" raise NotImplementedError() @staticmethod def _UpperCamelCase (a : torch.Tensor ) -> int: """simple docstring""" return (x + torch.sqrt(torch.square(lowerCamelCase__ ) + 4.0 )) / 2.0 class _snake_case( UpperCAmelCase ): __snake_case: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __snake_case: type = StudentT @classmethod def _UpperCamelCase (cls : List[str] , a : torch.Tensor , a : torch.Tensor , a : torch.Tensor ) -> Tuple: """simple docstring""" A__ = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps ) A__ = 2.0 + cls.squareplus(lowerCamelCase__ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _snake_case( UpperCAmelCase ): __snake_case: Dict[str, int] = {"loc": 1, "scale": 1} __snake_case: type = Normal @classmethod def _UpperCamelCase (cls : Union[str, Any] , a : torch.Tensor , a : torch.Tensor ) -> List[str]: """simple docstring""" A__ = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _snake_case( UpperCAmelCase ): __snake_case: Dict[str, int] = {"total_count": 1, "logits": 1} __snake_case: type = NegativeBinomial @classmethod def _UpperCamelCase (cls : Tuple , a : torch.Tensor , a : torch.Tensor ) -> Optional[int]: """simple docstring""" A__ = cls.squareplus(lowerCamelCase__ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _UpperCamelCase (self : Optional[Any] , a : int ) -> str: """simple docstring""" A__ = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ ) else: return Independent(self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ ) , 1 ) def _UpperCamelCase (self : Union[str, Any] , a : List[Any] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None ) -> str: """simple docstring""" A__ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
531
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase = get_tests_dir("fixtures") UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json") UpperCamelCase = get_tests_dir("fixtures/dummy-config.json") class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[int] = 0 def __a ( self :str ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ :List[str] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) config.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) # make sure private variable is not incorrectly saved UpperCamelCase__ :Tuple = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): with self.assertRaisesRegex( lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def __a ( self :List[Any] ): with self.assertRaisesRegex( lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" ) def __a ( self :int ): with self.assertRaisesRegex( lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def __a ( self :Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def __a ( self :Dict ): try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __a ( self :Optional[int] ): class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : Optional[int] = True try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # If remote code is not set, the default is to use local UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
45
0
'''simple docstring''' from math import sqrt def lowerCamelCase ( lowerCAmelCase : int = 100_0000 ): """simple docstring""" __magic_name__ : int = 0 __magic_name__ : int = 0 __magic_name__ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(lowercase__ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F'{solution() = }')
561
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ): super().__init__() UpperCamelCase__ :Tuple = value_function UpperCamelCase__ :Optional[int] = unet UpperCamelCase__ :List[str] = scheduler UpperCamelCase__ :Dict = env UpperCamelCase__ :Dict = env.get_dataset() UpperCamelCase__ :Union[str, Any] = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].mean() except: # noqa: E722 pass UpperCamelCase__ :Any = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].std() except: # noqa: E722 pass UpperCamelCase__ :List[Any] = env.observation_space.shape[0] UpperCamelCase__ :List[str] = env.action_space.shape[0] def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ): return (x_in - self.means[key]) / self.stds[key] def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): return x_in * self.stds[key] + self.means[key] def __a ( self :Any , lowerCamelCase__ :int ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): for key, val in cond.items(): UpperCamelCase__ :str = val.clone() return x_in def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = x.shape[0] UpperCamelCase__ :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ ) UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance ) UpperCamelCase__ :Dict = model_std * grad UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Dict = x.detach() UpperCamelCase__ :int = x + scale * grad UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ): # normalize the observations and create batch dimension UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" ) UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 ) UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )} UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device ) UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() UpperCamelCase__ :Dict = x[sorted_idx] UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim] UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy() UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: UpperCamelCase__ :List[str] = 0 else: # if we didn't run value guiding, select a random action UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ ) UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0] return denorm_actions
45
0
'''simple docstring''' from scipy.stats import spearmanr import datasets _lowerCamelCase = """\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n""" _lowerCamelCase = """\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n""" _lowerCamelCase = R"""\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case (datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) ,reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=False ): UpperCAmelCase_ : Any = spearmanr(lowerCamelCase__ ,lowerCamelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
71
def A ( lowercase__ : int ) -> bool: if num < 0: return False UpperCamelCase__ :int = num UpperCamelCase__ :int = 0 while num > 0: UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
45
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class _a ( unittest.TestCase ): def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=True, ) -> Optional[Any]: UpperCAmelCase_: List[Any] = parent UpperCAmelCase_: List[Any] = batch_size UpperCAmelCase_: Any = num_channels UpperCAmelCase_: List[str] = image_size UpperCAmelCase_: Dict = min_resolution UpperCAmelCase_: List[str] = max_resolution UpperCAmelCase_: str = do_resize UpperCAmelCase_: int = size_divisor UpperCAmelCase_: Optional[int] = do_rescale def __snake_case (self ) -> Optional[Any]: return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class _a ( _lowerCAmelCase , unittest.TestCase ): A = GLPNImageProcessor if is_vision_available() else None def __snake_case (self ) -> Union[str, Any]: UpperCAmelCase_: Dict = GLPNImageProcessingTester(self ) @property def __snake_case (self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def __snake_case (self ) -> str: UpperCAmelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__, """size_divisor""" ) ) self.assertTrue(hasattr(lowerCamelCase__, """resample""" ) ) self.assertTrue(hasattr(lowerCamelCase__, """do_rescale""" ) ) def __snake_case (self ) -> List[str]: pass def __snake_case (self ) -> List[str]: # Initialize image_processing UpperCAmelCase_: int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_: str = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__, Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_: Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __snake_case (self ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_: str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_: Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__, np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_: List[str] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __snake_case (self ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_: Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__, torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_: List[str] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
556
from __future__ import annotations def A ( lowercase__ : list[int] ) -> bool: return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
45
0
__A : Tuple = range(2, 2_0 + 1) __A : Any = [1_0**k for k in range(ks[-1] + 1)] __A : Tuple = {} def __a ( A__ : List[Any] , A__ : Tuple , A__ : str , A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) ) SCREAMING_SNAKE_CASE = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) ) SCREAMING_SNAKE_CASE = 0, 0 SCREAMING_SNAKE_CASE = n - i SCREAMING_SNAKE_CASE = memo.get(lowercase__ ) if sub_memo is not None: SCREAMING_SNAKE_CASE = sub_memo.get(lowercase__ ) if jumps is not None and len(lowercase__ ) > 0: # find and make the largest jump without going over SCREAMING_SNAKE_CASE = -1 for _k in range(len(lowercase__ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: SCREAMING_SNAKE_CASE = _k break if max_jump >= 0: SCREAMING_SNAKE_CASE = jumps[max_jump] # since the difference between jumps is cached, add c SCREAMING_SNAKE_CASE = diff + c for j in range(min(lowercase__ , len(lowercase__ ) ) ): SCREAMING_SNAKE_CASE = divmod(lowercase__ , 10 ) if new_c > 0: add(lowercase__ , lowercase__ , lowercase__ ) else: SCREAMING_SNAKE_CASE = [] else: SCREAMING_SNAKE_CASE = {c: []} SCREAMING_SNAKE_CASE = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps SCREAMING_SNAKE_CASE = next_term(lowercase__ , k - 1 , i + dn , lowercase__ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead SCREAMING_SNAKE_CASE = compute(lowercase__ , lowercase__ , i + dn , lowercase__ ) diff += _diff dn += terms_jumped SCREAMING_SNAKE_CASE = sub_memo[c] # keep jumps sorted by # of terms skipped SCREAMING_SNAKE_CASE = 0 while j < len(lowercase__ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(lowercase__ , (diff, dn, k) ) return (diff, dn) def __a ( A__ : List[str] , A__ : Optional[int] , A__ : Tuple , A__ : int ): if i >= n: return 0, i if k > len(lowercase__ ): a_i.extend([0 for _ in range(k - len(lowercase__ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) SCREAMING_SNAKE_CASE = i SCREAMING_SNAKE_CASE = 0, 0, 0 for j in range(len(lowercase__ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 SCREAMING_SNAKE_CASE = ds_c + ds_b diff += addend SCREAMING_SNAKE_CASE = 0 for j in range(lowercase__ ): SCREAMING_SNAKE_CASE = a_i[j] + addend SCREAMING_SNAKE_CASE = divmod(lowercase__ , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(lowercase__ , lowercase__ , lowercase__ ) return diff, i - start_i def __a ( A__ : Optional[Any] , A__ : Tuple , A__ : List[str] ): for j in range(lowercase__ , len(lowercase__ ) ): SCREAMING_SNAKE_CASE = digits[j] + addend if s >= 10: SCREAMING_SNAKE_CASE = divmod(lowercase__ , 10 ) SCREAMING_SNAKE_CASE = addend // 10 + quotient else: SCREAMING_SNAKE_CASE = s SCREAMING_SNAKE_CASE = addend // 10 if addend == 0: break while addend > 0: SCREAMING_SNAKE_CASE = divmod(lowercase__ , 10 ) digits.append(lowercase__ ) def __a ( A__ : int = 10**15 ): SCREAMING_SNAKE_CASE = [1] SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 0 while True: SCREAMING_SNAKE_CASE = next_term(lowercase__ , 20 , i + dn , lowercase__ ) dn += terms_jumped if dn == n - i: break SCREAMING_SNAKE_CASE = 0 for j in range(len(lowercase__ ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'{solution() = }')
16
from __future__ import annotations class lowerCAmelCase_ : """simple docstring""" def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ): UpperCamelCase__ :List[str] = key def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :List[str] = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :int = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content] def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Dict = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :List[str] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Tuple = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned UpperCamelCase__ :Optional[int] = """""" for ch in content: ans += chr(ord(lowerCamelCase__ ) ^ key ) return ans def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) try: with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
45
0
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def a ( A__ : Dict , A__ : Optional[int] , A__ : Any , A__ : List[str] ) -> str: """simple docstring""" _lowercase =FunnelConfig.from_json_file(lowercase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowercase =FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.' ) lowercase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
291
import random def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int: UpperCamelCase__ :List[Any] = a[left_index] UpperCamelCase__ :Dict = left_index + 1 for j in range(left_index + 1 , lowercase__ ): if a[j] < pivot: UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j] i += 1 UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index] return i - 1 def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]: if left < right: UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 ) UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ ) quick_sort_random( lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point quick_sort_random( lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point def A ( ) -> List[Any]: UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip() UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )] quick_sort_random(lowercase__ , 0 , len(lowercase__ ) ) print(lowercase__ ) if __name__ == "__main__": main()
45
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __lowerCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __lowerCamelCase = TaTokenizerFast __lowerCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __lowerCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
608
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" _snake_case : Tuple = """dinat""" _snake_case : List[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ): super().__init__(**lowerCamelCase__ ) UpperCamelCase__ :Any = patch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :int = embed_dim UpperCamelCase__ :Optional[Any] = depths UpperCamelCase__ :Any = len(lowerCamelCase__ ) UpperCamelCase__ :str = num_heads UpperCamelCase__ :Optional[int] = kernel_size UpperCamelCase__ :Optional[int] = dilations UpperCamelCase__ :Tuple = mlp_ratio UpperCamelCase__ :Dict = qkv_bias UpperCamelCase__ :List[str] = hidden_dropout_prob UpperCamelCase__ :List[str] = attention_probs_dropout_prob UpperCamelCase__ :Union[str, Any] = drop_path_rate UpperCamelCase__ :Tuple = hidden_act UpperCamelCase__ :List[Any] = layer_norm_eps UpperCamelCase__ :Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) ) UpperCamelCase__ :Tuple = layer_scale_init_value UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
45
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _lowercase : def __init__( self , a , ): snake_case__ : Tuple =parent snake_case__ : str =1_3 snake_case__ : Union[str, Any] =7 snake_case__ : List[Any] =3_0 snake_case__ : int =self.seq_length + self.mem_len snake_case__ : Tuple =1_5 snake_case__ : int =True snake_case__ : int =True snake_case__ : Union[str, Any] =9_9 snake_case__ : Any =[1_0, 5_0, 8_0] snake_case__ : List[str] =3_2 snake_case__ : Optional[Any] =3_2 snake_case__ : int =4 snake_case__ : Optional[int] =8 snake_case__ : Tuple =1_2_8 snake_case__ : List[Any] =2 snake_case__ : Optional[int] =2 snake_case__ : Dict =None snake_case__ : List[Any] =1 snake_case__ : Any =0 snake_case__ : List[str] =3 snake_case__ : Any =self.vocab_size - 1 snake_case__ : Optional[int] =0.01 def lowercase__ ( self ): snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Any =None if self.use_labels: snake_case__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : int =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowercase__ ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowercase__ ( self , a , a , a , a ): snake_case__ : Any =TFTransfoXLModel(lowerCamelCase__ ) snake_case__ : List[str] =model(lowerCamelCase__ ).to_tuple() snake_case__ : int ={"""input_ids""": input_ids_a, """mems""": mems_a} snake_case__ : str =model(lowerCamelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase__ ( self , a , a , a , a ): snake_case__ : Union[str, Any] =TFTransfoXLLMHeadModel(lowerCamelCase__ ) snake_case__ : str =model(lowerCamelCase__ ).to_tuple() snake_case__ : Tuple ={"""input_ids""": input_ids_a, """labels""": lm_labels} snake_case__ : Any =model(lowerCamelCase__ ).to_tuple() snake_case__ : Optional[Any] =model([input_ids_a, mems_a] ).to_tuple() snake_case__ : str ={"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} snake_case__ : Optional[Any] =model(lowerCamelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase__ ( self , a , a , a , a ): snake_case__ : Union[str, Any] =TFTransfoXLForSequenceClassification(lowerCamelCase__ ) snake_case__ : Dict =model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self ): snake_case__ : Dict =self.prepare_config_and_inputs() (snake_case__) : int =config_and_inputs snake_case__ : List[Any] ={"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class _lowercase ( _A , _A , unittest.TestCase ): _a : str = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) _a : List[str] = () if is_tf_available() else () _a : List[str] = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented _a : Optional[Any] = False _a : Any = False _a : Tuple = False _a : List[Any] = False def lowercase__ ( self , a , a , a , a , a ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowercase__ ( self ): snake_case__ : Any =TFTransfoXLModelTester(self ) snake_case__ : Optional[int] =ConfigTester(self , config_class=lowerCamelCase__ , d_embed=3_7 ) def lowercase__ ( self ): self.config_tester.run_common_tests() def lowercase__ ( self ): self.model_tester.set_seed() snake_case__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ ) def lowercase__ ( self ): self.model_tester.set_seed() snake_case__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ ) def lowercase__ ( self ): snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ ) def lowercase__ ( self ): snake_case__ : str =self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: snake_case__ : Dict =model_class(lowerCamelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: snake_case__ : Optional[Any] =model.get_output_embeddings() assert isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) snake_case__ : List[Any] =model.get_bias() assert name is None else: snake_case__ : Union[str, Any] =model.get_output_embeddings() assert x is None snake_case__ : int =model.get_bias() assert name is None def lowercase__ ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def lowercase__ ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str =TFTransfoXLModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def lowercase__ ( self ): pass @require_tf class _lowercase ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def lowercase__ ( self ): snake_case__ : int =TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off snake_case__ : Union[str, Any] =tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off snake_case__ : Tuple =[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> snake_case__ : str =model.generate(lowerCamelCase__ , max_length=2_0_0 , do_sample=lowerCamelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
385
def A ( lowercase__ : int , lowercase__ : int ) -> int: return int(input_a == input_a == 0 ) def A ( ) -> None: print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
45
0
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' return int(input_a == input_a == 0 ) def _a ( ) -> None: '''simple docstring''' print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
663
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ): UpperCamelCase__ :List[Any] = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :List[str] = image_size UpperCamelCase__ :Dict = min_resolution UpperCamelCase__ :List[str] = max_resolution UpperCamelCase__ :str = do_resize UpperCamelCase__ :int = size_divisor UpperCamelCase__ :Optional[int] = do_rescale def __a ( self :str ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None def __a ( self :Dict ): UpperCamelCase__ :Dict = GLPNImageProcessingTester(self ) @property def __a ( self :List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) ) def __a ( self :Optional[int] ): pass def __a ( self :Tuple ): # Initialize image_processing UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :str ): # Initialize image_processing UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :Any ): # Initialize image_processing UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
45
0
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : int = logging.getLogger(__name__) def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=lowercase__ , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=lowercase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=lowercase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=lowercase__ , default='''data/dump''' , help='''The dump file prefix.''' ) SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": SCREAMING_SNAKE_CASE_ : Dict = BertTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]` SCREAMING_SNAKE_CASE_ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]` elif args.tokenizer_type == "roberta": SCREAMING_SNAKE_CASE_ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>` SCREAMING_SNAKE_CASE_ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>` elif args.tokenizer_type == "gpt2": SCREAMING_SNAKE_CASE_ : int = GPTaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>` SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: SCREAMING_SNAKE_CASE_ : Any = fp.readlines() logger.info('''Start encoding''' ) logger.info(f'{len(lowercase__ )} examples to process.' ) SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = 1_00_00 SCREAMING_SNAKE_CASE_ : Optional[int] = time.time() for text in data: SCREAMING_SNAKE_CASE_ : Any = f'{bos} {text.strip()} {sep}' SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) rslt.append(lowercase__ ) iter += 1 if iter % interval == 0: SCREAMING_SNAKE_CASE_ : int = time.time() logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) SCREAMING_SNAKE_CASE_ : Optional[int] = time.time() logger.info('''Finished binarization''' ) logger.info(f'{len(lowercase__ )} examples processed.' ) SCREAMING_SNAKE_CASE_ : Any = f'{args.dump_file}.{args.tokenizer_name}.pickle' SCREAMING_SNAKE_CASE_ : Any = tokenizer.vocab_size if vocab_size < (1 << 16): SCREAMING_SNAKE_CASE_ : List[str] = [np.uintaa(lowercase__ ) for d in rslt] else: SCREAMING_SNAKE_CASE_ : List[str] = [np.intaa(lowercase__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'Dump to {dp_file}' ) with open(lowercase__ , '''wb''' ) as handle: pickle.dump(rslt_ , lowercase__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
512
import math def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowercase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCamelCase = "Enter the base and the power separated by a comma: " UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. UpperCamelCase = res(xa, ya) UpperCamelCase = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
45
0
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _a : List[Any] = 16 _a : int = 32 def UpperCamelCase__ ( _A: Accelerator , _A: int = 16 ): '''simple docstring''' __lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __lowerCamelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_A: str ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_A: List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) __lowerCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _a : Optional[int] = mocked_dataloaders # noqa: F811 def UpperCamelCase__ ( _A: str , _A: Optional[int] ): '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config["""lr"""] __lowerCamelCase = int(config["""num_epochs"""] ) __lowerCamelCase = int(config["""seed"""] ) __lowerCamelCase = int(config["""batch_size"""] ) __lowerCamelCase = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase__ ) def inner_training_loop(_A: Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() , lr=lowercase__ ) __lowerCamelCase = get_dataloaders(lowercase__ , lowercase__ ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**lowercase__ ) __lowerCamelCase = outputs.loss accelerator.backward(lowercase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**lowercase__ ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowercase__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
479
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCAmelCase_ : """simple docstring""" def __init__( self :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = parent UpperCamelCase__ :int = 13 UpperCamelCase__ :Optional[int] = 7 UpperCamelCase__ :Dict = True UpperCamelCase__ :Dict = True UpperCamelCase__ :str = True UpperCamelCase__ :List[Any] = True UpperCamelCase__ :Any = True UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Optional[int] = False UpperCamelCase__ :Tuple = False UpperCamelCase__ :Optional[int] = 2 UpperCamelCase__ :List[str] = 99 UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Any = 32 UpperCamelCase__ :List[str] = 2 UpperCamelCase__ :int = 4 UpperCamelCase__ :List[str] = 0.1 UpperCamelCase__ :Union[str, Any] = 0.1 UpperCamelCase__ :Union[str, Any] = 5_12 UpperCamelCase__ :List[str] = 16 UpperCamelCase__ :str = 2 UpperCamelCase__ :Optional[int] = 0.02 UpperCamelCase__ :Optional[int] = 3 UpperCamelCase__ :Optional[int] = 4 UpperCamelCase__ :Optional[int] = """last""" UpperCamelCase__ :Tuple = True UpperCamelCase__ :int = None UpperCamelCase__ :Dict = 0 def __a ( self :int ): UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) UpperCamelCase__ :Union[str, Any] = None if self.use_input_lengths: UpperCamelCase__ :Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase__ :List[str] = None if self.use_token_type_ids: UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase__ :int = None UpperCamelCase__ :List[str] = None UpperCamelCase__ :List[str] = None if self.use_labels: UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ :List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ): UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask] UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ): UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCamelCase__ :Any = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ): UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ ) UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ): UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ ) UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ): UpperCamelCase__ :Any = self.num_labels UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCamelCase__ :List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ): UpperCamelCase__ :Optional[int] = self.num_choices UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase__ :int = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCamelCase__ :List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self :Tuple ): UpperCamelCase__ :str = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :str = config_and_inputs UpperCamelCase__ :Optional[Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : List[str] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : List[Any] = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _snake_case : Optional[int] = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : List[Any] = False _snake_case : Tuple = False def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self :List[str] ): UpperCamelCase__ :List[str] = TFFlaubertModelTester(self ) UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 ) def __a ( self :int ): self.config_tester.run_common_tests() def __a ( self :List[str] ): UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ ) def __a ( self :Tuple ): UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ ) @slow def __a ( self :str ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __a ( self :str ): UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCamelCase__ :Optional[int] = tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0] UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , lowerCamelCase__ ) # compare the actual values for a slice. UpperCamelCase__ :str = tf.convert_to_tensor( [ [ [-1.876_8773, -1.56_6555, 0.2707_2418], [-1.692_0038, -0.587_3505, 1.932_9599], [-2.956_3985, -1.699_3835, 1.797_2052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
45
0
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _A ( UpperCAmelCase = "" ): '''simple docstring''' A__ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250""" A__ = BeautifulSoup(requests.get(lowercase__ ).text ,'html.parser' ) A__ = soup.find_all('td' ,attrs='titleColumn' ) A__ = soup.find_all('td' ,class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(lowercase__ ,lowercase__ ) } def _A ( UpperCAmelCase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' A__ = get_imdb_top_aaa_movies() with open(lowercase__ ,'w' ,newline='' ) as out_file: A__ = csv.writer(lowercase__ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
531
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device UpperCamelCase = False class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self :List[Any] ): UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :Any = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :str = generator.manual_seed(0 ) UpperCamelCase__ :str = pipe.dual_guided( prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __a ( self :Dict ): UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = """cyberpunk 2077""" UpperCamelCase__ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) UpperCamelCase__ :str = torch.manual_seed(0 ) UpperCamelCase__ :Dict = pipe.dual_guided( prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """ UpperCamelCase__ :List[str] = torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = pipe.text_to_image( prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
45
0
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def lowerCamelCase ( lowerCAmelCase : Optional[int] ): """simple docstring""" __magic_name__ : Optional[int] = os.path.join(args.tf_model_dir , 'parameters.json' ) __magic_name__ : List[str] = json.loads(open(lowercase__ ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('.pt' ): __magic_name__ : Union[str, Any] = args.output + """.pt""" __magic_name__ : str = OrderedDict() with tf.device('/CPU:0' ): __magic_name__ : List[str] = tf.train.load_checkpoint(args.tf_model_dir ) __magic_name__ : Tuple = reader.get_variable_to_shape_map() for key_name in shapes.keys(): __magic_name__ : Dict = reader.get_tensor(lowercase__ ).astype(np.floataa ) if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ): continue if key_name.startswith('pasts/' ): if key_name.startswith('pasts/mlp' ): __magic_name__ : Optional[int] = int(key_name[9] ) elif key_name.startswith('pasts/out' ): __magic_name__ : Tuple = 8 __magic_name__ : List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time __magic_name__ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __magic_name__ : str = torch.tensor(lowercase__ ) elif key_name.startswith('model/moe' ): __magic_name__ : List[str] = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/switch_gating/kernel' ): __magic_name__ : int = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player __magic_name__ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __magic_name__ : Optional[Any] = torch.tensor(lowercase__ ) elif key_name.endswith('/softmlp/kernel' ): __magic_name__ : Any = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player __magic_name__ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __magic_name__ : Dict = torch.tensor(lowercase__ ) elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ): __magic_name__ : Dict = key_name[-9:-7] for i in range(16 ): __magic_name__ : Optional[Any] = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) __magic_name__ : int = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided __magic_name__ : int = torch.tensor(lowercase__ ) elif key_name.startswith('model/mlp' ): __magic_name__ : Dict = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/p1/kernel' ): __magic_name__ : Optional[Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player __magic_name__ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __magic_name__ : Union[str, Any] = torch.tensor(lowercase__ ) elif key_name.endswith('/p1/bias' ): __magic_name__ : Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player __magic_name__ : Dict = vnp.copy() # same because it is one dimensional __magic_name__ : List[str] = torch.tensor(lowercase__ ) elif key_name.endswith('/p2/kernel' ): __magic_name__ : List[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player __magic_name__ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __magic_name__ : Any = torch.tensor(lowercase__ ) elif key_name.endswith('/p2/bias' ): __magic_name__ : Optional[int] = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player __magic_name__ : List[Any] = vnp.copy() # same because it is one dimensional __magic_name__ : str = torch.tensor(lowercase__ ) elif key_name.startswith('model/ln' ): __magic_name__ : Optional[Any] = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): __magic_name__ : List[Any] = """model.blocks.%d.feed_forward.norm.bias""" % player __magic_name__ : List[str] = vnp.copy() # same because it is one dimensional __magic_name__ : Union[str, Any] = torch.tensor(lowercase__ ) elif key_name.endswith('/g' ): __magic_name__ : Any = """model.blocks.%d.feed_forward.norm.weight""" % player __magic_name__ : Dict = vnp.copy() # same because it is one dimensional __magic_name__ : List[Any] = torch.tensor(lowercase__ ) elif key_name.startswith('model/att' ): __magic_name__ : List[Any] = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/qkv/kernel' ): __magic_name__ : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum __magic_name__ : Union[str, Any] = state[:, 0, :, :] __magic_name__ : Union[str, Any] = state[:, 1, :, :] __magic_name__ : List[Any] = state[:, 2, :, :] __magic_name__ : str = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __magic_name__ : Tuple = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __magic_name__ : Dict = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __magic_name__ : str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player __magic_name__ : List[str] = torch.tensor(lowercase__ ) __magic_name__ : Optional[int] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player __magic_name__ : Optional[Any] = torch.tensor(lowercase__ ) __magic_name__ : List[str] = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player __magic_name__ : List[str] = torch.tensor(lowercase__ ) elif key_name.endswith('/o/kernel' ): __magic_name__ : Dict = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player __magic_name__ : str = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix __magic_name__ : int = torch.tensor(lowercase__ ) elif key_name.startswith('model/an' ): __magic_name__ : Optional[int] = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): __magic_name__ : Optional[int] = """model.blocks.%d.self_attn.norm.bias""" % player __magic_name__ : Optional[int] = vnp.copy() # same because it is one dimensional __magic_name__ : str = torch.tensor(lowercase__ ) elif key_name.endswith('/g' ): __magic_name__ : str = """model.blocks.%d.self_attn.norm.weight""" % player __magic_name__ : Tuple = vnp.copy() # same because it is one dimensional __magic_name__ : str = torch.tensor(lowercase__ ) elif ( key_name.startswith('model/wte' ) or key_name.startswith('model/wpe' ) or key_name.startswith('model/ete' ) ): __magic_name__ : int = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] __magic_name__ : List[Any] = """model.%s.weight""" % nlayer __magic_name__ : str = vnp.copy() # same in embedded __magic_name__ : Dict = torch.tensor(lowercase__ ) if key_name.startswith('model/wte' ): __magic_name__ : Any = """lm_head.weight""" __magic_name__ : Optional[int] = vnp.copy() # same in embedded __magic_name__ : Tuple = torch.tensor(lowercase__ ) elif key_name.startswith('model/wob' ): __magic_name__ : Dict = """final_logits_bias""" __magic_name__ : Optional[Any] = vnp.copy() # same in embedded __magic_name__ : Optional[Any] = state.reshape((1, -1) ) __magic_name__ : Optional[Any] = torch.tensor(lowercase__ ) elif key_name == "model/dense/kernel": __magic_name__ : Tuple = """model.last_project.weight""" __magic_name__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __magic_name__ : Tuple = torch.tensor(lowercase__ ) elif key_name == "model/dense_1/bias": __magic_name__ : List[Any] = """model.last_project.bias""" __magic_name__ : Any = vnp.copy() # same because it is one dimensional __magic_name__ : List[Any] = torch.tensor(lowercase__ ) torch.save(lowercase__ , args.output ) if __name__ == "__main__": lowerCAmelCase :Optional[int] = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') lowerCAmelCase :Dict = parser.parse_args() convert_tf_gptsan_to_pt(args)
561
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase_ : """simple docstring""" def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ): UpperCamelCase__ :Any = parent UpperCamelCase__ :Union[str, Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :Optional[Any] = image_size UpperCamelCase__ :Union[str, Any] = patch_size UpperCamelCase__ :Union[str, Any] = is_training UpperCamelCase__ :str = use_input_mask UpperCamelCase__ :int = use_token_type_ids UpperCamelCase__ :int = use_labels UpperCamelCase__ :List[Any] = vocab_size UpperCamelCase__ :List[str] = hidden_size UpperCamelCase__ :List[Any] = num_hidden_layers UpperCamelCase__ :List[str] = num_attention_heads UpperCamelCase__ :Tuple = intermediate_size UpperCamelCase__ :Any = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout_prob UpperCamelCase__ :Tuple = attention_probs_dropout_prob UpperCamelCase__ :Dict = max_position_embeddings UpperCamelCase__ :Tuple = type_vocab_size UpperCamelCase__ :Union[str, Any] = type_sequence_label_size UpperCamelCase__ :int = initializer_range UpperCamelCase__ :List[Any] = coordinate_size UpperCamelCase__ :Tuple = shape_size UpperCamelCase__ :Dict = num_labels UpperCamelCase__ :str = num_choices UpperCamelCase__ :Tuple = scope UpperCamelCase__ :str = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCamelCase__ :List[str] = text_seq_length UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1 UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length def __a ( self :Tuple ): UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCamelCase__ :str = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase__ :List[str] = bbox[i, j, 3] UpperCamelCase__ :Optional[int] = bbox[i, j, 1] UpperCamelCase__ :Optional[Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase__ :Tuple = bbox[i, j, 2] UpperCamelCase__ :Optional[Any] = bbox[i, j, 0] UpperCamelCase__ :List[str] = tmp_coordinate UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__ :Any = None if self.use_input_mask: UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCamelCase__ :Optional[Any] = None if self.use_token_type_ids: UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCamelCase__ :List[str] = None UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCamelCase__ :Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ): UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ ) # text + image UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) UpperCamelCase__ :Tuple = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , ) UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ): UpperCamelCase__ :Optional[Any] = self.num_labels UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ ) UpperCamelCase__ :List[str] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = self.num_labels UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ): UpperCamelCase__ :Dict = 2 UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ ) UpperCamelCase__ :int = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs() ((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs UpperCamelCase__ :List[str] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _snake_case : Dict = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) _snake_case : Optional[int] = False _snake_case : List[str] = False _snake_case : Tuple = False def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ): return True def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ): UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ ) if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = { k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCamelCase__ ): UpperCamelCase__ :Tuple = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __a ( self :Dict ): UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self ) UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Any ): self.config_tester.run_common_tests() def __a ( self :Optional[int] ): UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ ) if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ): # The number of elements in the loss should be the same as the number of elements in the label UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0] ] UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCamelCase__ :Optional[Any] = -1_00 UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ ) UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ ) # Get keys that were added with the _prepare_for_class function UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys() UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters UpperCamelCase__ :str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCamelCase__ :Any = {0: """input_ids"""} for label_key in label_keys: UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = label_key UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCamelCase__ :Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCamelCase__ :List[str] = prepared_for_class[value] UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ ) # Send to model UpperCamelCase__ :str = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ :Dict = type self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Tuple ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @slow def __a ( self :Optional[int] ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def A ( ) -> List[str]: UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __a ( self :Optional[Any] ): return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None @slow def __a ( self :Dict ): UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) UpperCamelCase__ :List[Any] = self.default_image_processor UpperCamelCase__ :str = prepare_img() UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values UpperCamelCase__ :str = tf.constant([[1, 2]] ) UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ ) # verify the logits UpperCamelCase__ :int = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ ) UpperCamelCase__ :List[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
45
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[str] ="""data2vec-vision""" def __init__( self ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.0 ,_snake_case=0.0 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case=2_24 ,_snake_case=16 ,_snake_case=3 ,_snake_case=False ,_snake_case=False ,_snake_case=False ,_snake_case=False ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=True ,_snake_case=[3, 5, 7, 11] ,_snake_case=[1, 2, 3, 6] ,_snake_case=True ,_snake_case=0.4 ,_snake_case=2_56 ,_snake_case=1 ,_snake_case=False ,_snake_case=2_55 ,**_snake_case ,): super().__init__(**lowerCamelCase__ ) UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : Dict = num_attention_heads UpperCAmelCase_ : Union[str, Any] = intermediate_size UpperCAmelCase_ : Optional[int] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Tuple = layer_norm_eps UpperCAmelCase_ : int = image_size UpperCAmelCase_ : List[Any] = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : List[Any] = use_mask_token UpperCAmelCase_ : List[Any] = use_absolute_position_embeddings UpperCAmelCase_ : Any = use_relative_position_bias UpperCAmelCase_ : Union[str, Any] = use_shared_relative_position_bias UpperCAmelCase_ : List[Any] = layer_scale_init_value UpperCAmelCase_ : List[Any] = drop_path_rate UpperCAmelCase_ : List[Any] = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase_ : Tuple = out_indices UpperCAmelCase_ : Any = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase_ : int = use_auxiliary_head UpperCAmelCase_ : List[str] = auxiliary_loss_weight UpperCAmelCase_ : Optional[Any] = auxiliary_channels UpperCAmelCase_ : Tuple = auxiliary_num_convs UpperCAmelCase_ : List[str] = auxiliary_concat_input UpperCAmelCase_ : Optional[Any] = semantic_loss_ignore_index class _snake_case (__SCREAMING_SNAKE_CASE): __A : int =version.parse("1.11") @property def UpperCamelCase__ ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase__ ( self ): return 1E-4
71
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The column name of the images in the files."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} ) _snake_case : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __a ( self :List[str] ): UpperCamelCase__ :Optional[Any] = {} if self.train_dir is not None: UpperCamelCase__ :int = self.train_dir if self.validation_dir is not None: UpperCamelCase__ :List[str] = self.validation_dir UpperCamelCase__ :Optional[int] = data_files if data_files else None @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = field( default=lowercase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) _snake_case : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} ) _snake_case : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _snake_case : float = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) _snake_case : bool = field( default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : float = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def A ( lowercase__ : Union[str, Any] ) -> Dict: UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def A ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase__ :List[str] = training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase__ :Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase__ :Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase__ :Union[str, Any] = split["""train"""] UpperCamelCase__ :Any = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ :Optional[int] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Tuple = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names else: UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase__ :Union[str, Any] = data_args.image_column_name elif "image" in column_names: UpperCamelCase__ :Optional[Any] = """image""" elif "img" in column_names: UpperCamelCase__ :List[str] = """img""" else: UpperCamelCase__ :List[Any] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""] else: UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase__ :Any = Compose( [ Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(lowercase__ : Tuple ): UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase__ :Optional[Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate UpperCamelCase__ :Tuple = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase__ :Union[str, Any] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: UpperCamelCase__ :Any = None if training_args.resume_from_checkpoint is not None: UpperCamelCase__ :int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase__ :Dict = last_checkpoint UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase__ :int = trainer.evaluate() trainer.log_metrics("""eval""" , lowercase__ ) trainer.save_metrics("""eval""" , lowercase__ ) # Write model card and (optionally) push to hub UpperCamelCase__ :Optional[int] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def A ( lowercase__ : Union[str, Any] ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCAmelCase_ (lowerCAmelCase__: Dict ): """simple docstring""" UpperCAmelCase_: List[str] = [False] * len(lowercase__ ) UpperCAmelCase_: List[str] = [-1] * len(lowercase__ ) def dfs(lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[str] ): UpperCAmelCase_: List[Any] = True UpperCAmelCase_: List[str] = c for u in graph[v]: if not visited[u]: dfs(lowercase__ , 1 - c ) for i in range(len(lowercase__ ) ): if not visited[i]: dfs(lowercase__ , 0 ) for i in range(len(lowercase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph a : List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
556
from __future__ import annotations def A ( lowercase__ : int ) -> list[int]: UpperCamelCase__ :Union[str, Any] = [True] * limit UpperCamelCase__ :int = False UpperCamelCase__ :Optional[Any] = False UpperCamelCase__ :str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase__ :List[Any] = i * 2 while index < limit: UpperCamelCase__ :Tuple = False UpperCamelCase__ :Tuple = index + i UpperCamelCase__ :str = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def A ( lowercase__ : int = 100_0000 ) -> int: UpperCamelCase__ :Any = prime_sieve(lowercase__ ) UpperCamelCase__ :Optional[int] = 0 UpperCamelCase__ :Optional[Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): UpperCamelCase__ :Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase__ :Union[str, Any] = j - i UpperCamelCase__ :Any = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
45
0
import json import sys def __a ( A__ : Optional[Any] , A__ : Any ): with open(lowercase__ , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE = json.load(lowercase__ ) SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """] for benchmark_name in sorted(lowercase__ ): SCREAMING_SNAKE_CASE = results[benchmark_name] SCREAMING_SNAKE_CASE = benchmark_name.split("/" )[-1] output_md.append(F"### Benchmark: {benchmark_file_name}" ) SCREAMING_SNAKE_CASE = """| metric |""" SCREAMING_SNAKE_CASE = """|--------|""" SCREAMING_SNAKE_CASE = """| new / old (diff) |""" for metric_name in sorted(lowercase__ ): SCREAMING_SNAKE_CASE = benchmark_res[metric_name] SCREAMING_SNAKE_CASE = metric_vals["""new"""] SCREAMING_SNAKE_CASE = metric_vals.get("old" , lowercase__ ) SCREAMING_SNAKE_CASE = metric_vals.get("diff" , lowercase__ ) SCREAMING_SNAKE_CASE = F" {new_val:f}" if isinstance(lowercase__ , (int, float) ) else """None""" if old_val is not None: val_str += F" / {old_val:f}" if isinstance(lowercase__ , (int, float) ) else "None" if dif_val is not None: val_str += F" ({dif_val:f})" if isinstance(lowercase__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(lowercase__ , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(lowercase__ ) ) if __name__ == "__main__": __A : Optional[int] = sys.argv[1] __A : Dict = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
16
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ): UpperCamelCase__ :Optional[Any] = parent UpperCamelCase__ :Dict = batch_size UpperCamelCase__ :Tuple = seq_length UpperCamelCase__ :Dict = is_training UpperCamelCase__ :List[str] = use_input_mask UpperCamelCase__ :Optional[Any] = use_token_type_ids UpperCamelCase__ :Tuple = use_labels UpperCamelCase__ :int = vocab_size UpperCamelCase__ :Tuple = hidden_size UpperCamelCase__ :Optional[Any] = num_hidden_layers UpperCamelCase__ :int = num_attention_heads UpperCamelCase__ :Optional[int] = intermediate_multiple_size UpperCamelCase__ :Optional[Any] = hidden_act UpperCamelCase__ :Optional[int] = hidden_dropout UpperCamelCase__ :List[Any] = attention_dropout UpperCamelCase__ :List[str] = weight_tying UpperCamelCase__ :List[str] = max_position_embeddings UpperCamelCase__ :Dict = type_vocab_size UpperCamelCase__ :List[Any] = type_sequence_label_size UpperCamelCase__ :List[str] = initializer_range UpperCamelCase__ :int = num_labels UpperCamelCase__ :Dict = num_choices UpperCamelCase__ :Any = scope def __a ( self :Any ): UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :str = None if self.use_input_mask: UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ :Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __a ( self :Union[str, Any] ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def __a ( self :Union[str, Any] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs() UpperCamelCase__ :Optional[int] = True return config, input_ids, input_mask, token_labels def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ): UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ): UpperCamelCase__ :List[str] = True UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ): UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ): UpperCamelCase__ :Union[str, Any] = True UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() # first forward pass UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0] UpperCamelCase__ :Union[str, Any] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0] # select random slice UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) def __a ( self :Tuple ): UpperCamelCase__ :int = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () _snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () _snake_case : str = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) _snake_case : Union[str, Any] = False _snake_case : Dict = False _snake_case : List[str] = False _snake_case : Optional[int] = False def __a ( self :List[Any] ): UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self ) UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def __a ( self :Dict ): self.config_tester.run_common_tests() def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): # This regression test was failing with PyTorch < 1.3 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ :Dict = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :List[str] ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def __a ( self :int ): UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b""" UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] UpperCamelCase__ :Union[str, Any] = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = [] for prompt in prompts: UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 ) UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) predicted_outputs += generated_string self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
45
0
from collections.abc import Sequence def a ( A__ : Sequence[int] | None = None ) -> int: """simple docstring""" if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) _lowercase =nums[0] for i in range(1 , len(lowercase__ ) ): _lowercase =nums[i] _lowercase =max(lowercase__ , ans + num , lowercase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowercase_ = int(input('Enter number of elements : ').strip()) lowercase_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
291
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def A ( lowercase__ : dict ) -> tuple: return (data["data"], data["target"]) def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier: UpperCamelCase__ :Tuple = XGBClassifier() classifier.fit(lowercase__ , lowercase__ ) return classifier def A ( ) -> None: UpperCamelCase__ :str = load_iris() UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split( lowercase__ , lowercase__ , test_size=0.25 ) UpperCamelCase__ :Optional[int] = iris["""target_names"""] # Create an XGBoost Classifier from the training data UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
45
0
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase = "▁" __lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _snake_case ( A__ , unittest.TestCase ): '''simple docstring''' UpperCamelCase__ =BigBirdTokenizer UpperCamelCase__ =BigBirdTokenizerFast UpperCamelCase__ =True UpperCamelCase__ =True def snake_case_ ( self : Union[str, Any] ): super().setUp() UpperCAmelCase_ :List[Any] = self.tokenizer_class(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case_ ( self : str ): UpperCAmelCase_ :List[str] = """<s>""" UpperCAmelCase_ :str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def snake_case_ ( self : Any ): UpperCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(lowerCamelCase__ ) , 1_004 ) def snake_case_ ( self : Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def snake_case_ ( self : Optional[Any] ): if not self.test_rust_tokenizer: return UpperCAmelCase_ :Any = self.get_tokenizer() UpperCAmelCase_ :str = self.get_rust_tokenizer() UpperCAmelCase_ :List[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase_ :List[str] = tokenizer.tokenize(lowerCamelCase__ ) UpperCAmelCase_ :Optional[int] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) UpperCAmelCase_ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) UpperCAmelCase_ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) UpperCAmelCase_ :Tuple = self.get_rust_tokenizer() UpperCAmelCase_ :Any = tokenizer.encode(lowerCamelCase__ ) UpperCAmelCase_ :Tuple = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def snake_case_ ( self : Optional[Any] ): UpperCAmelCase_ :Dict = BigBirdTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) UpperCAmelCase_ :str = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [285, 46, 10, 170, 382] , ) UpperCAmelCase_ :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def snake_case_ ( self : Dict ): return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def snake_case_ ( self : List[str] ): UpperCAmelCase_ :Dict = """Hello World!""" UpperCAmelCase_ :Any = [65, 18_536, 2_260, 101, 66] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def snake_case_ ( self : str ): UpperCAmelCase_ :Optional[Any] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) # fmt: off UpperCAmelCase_ :Any = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @require_torch @slow def snake_case_ ( self : str ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence UpperCAmelCase_ :Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ :Optional[Any] = """ """.join(lowerCamelCase__ ) UpperCAmelCase_ :Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase__ ) UpperCAmelCase_ :List[Any] = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase__ ) UpperCAmelCase_ :Optional[int] = BigBirdConfig(attention_type='''original_full''' ) UpperCAmelCase_ :List[str] = BigBirdModel(lowerCamelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowerCamelCase__ ) model(**lowerCamelCase__ ) @slow def snake_case_ ( self : List[str] ): UpperCAmelCase_ :Any = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) UpperCAmelCase_ :Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def snake_case_ ( self : Union[str, Any] ): # fmt: off UpperCAmelCase_ :int = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
608
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A ( lowercase__ : Optional[int] ) -> Optional[Any]: UpperCamelCase__ :Union[str, Any] = {} UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""] UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
45
0
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib __lowerCamelCase : Optional[int] = { """debug""": logging.DEBUG, """info""": logging.INFO, """warning""": logging.WARNING, """error""": logging.ERROR, """critical""": logging.CRITICAL, } __lowerCamelCase : List[Any] = logging.WARNING def A__ ( ): '''simple docstring''' snake_case__ : List[Any] =os.getenv("""DATASETS_VERBOSITY""" , lowercase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DATASETS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def A__ ( ): '''simple docstring''' return __name__.split(""".""" )[0] def A__ ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def A__ ( ): '''simple docstring''' snake_case__ : Union[str, Any] =_get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def A__ ( ): '''simple docstring''' snake_case__ : str =_get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def A__ ( _a : Optional[str] = None ): '''simple docstring''' if name is None: snake_case__ : Dict =_get_library_name() return logging.getLogger(lowercase__ ) def A__ ( ): '''simple docstring''' return _get_library_root_logger().getEffectiveLevel() def A__ ( _a : int ): '''simple docstring''' _get_library_root_logger().setLevel(lowercase__ ) def A__ ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def A__ ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def A__ ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def A__ ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def A__ ( ): '''simple docstring''' snake_case__ : List[str] =False def A__ ( ): '''simple docstring''' snake_case__ : Tuple =True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class _lowercase : def __init__( self , *a , **a ): # pylint: disable=unused-argument snake_case__ : List[Any] =args[0] if args else None def __iter__( self ): return iter(self._iterator ) def __getattr__( self , a ): def empty_fn(*a , **a ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): return self def __exit__( self , a , a , a ): return __lowerCamelCase : List[str] = True class _lowercase : def __call__( self , *a , a=False , **a ): if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowerCamelCase__ , **lowerCamelCase__ ) else: return EmptyTqdm(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase__ ( self , *a , **a ): snake_case__ : List[Any] =None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase__ ( self ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() __lowerCamelCase : Optional[Any] = _tqdm_cls() def A__ ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def A__ ( ): '''simple docstring''' global _tqdm_active snake_case__ : Optional[int] =True def A__ ( ): '''simple docstring''' global _tqdm_active snake_case__ : int =False
385
def A ( lowercase__ : int ) -> Optional[Any]: stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() UpperCamelCase = [int(item) for item in user_input.split(",")] print(stooge_sort(unsorted))
45
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def _a ( SCREAMING_SNAKE_CASE__ : dict ) -> tuple: '''simple docstring''' return (data["data"], data["target"]) def _a ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ) -> XGBClassifier: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = XGBClassifier() classifier.fit(lowercase__ , lowercase__ ) return classifier def _a ( ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = load_iris() SCREAMING_SNAKE_CASE__ : int = data_handling(lowercase__ ) SCREAMING_SNAKE_CASE__ : int = train_test_split( lowercase__ , lowercase__ , test_size=0.2_5 ) SCREAMING_SNAKE_CASE__ : Optional[int] = iris["""target_names"""] # Create an XGBoost Classifier from the training data SCREAMING_SNAKE_CASE__ : Optional[Any] = xgboost(lowercase__ , lowercase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
663
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]: UpperCamelCase__ :str = SavedModel() UpperCamelCase__ :List[str] = [] with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowercase__ )] ) with open(lowercase__ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) UpperCamelCase__ :Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ ) UpperCamelCase__ :List[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowercase__ ) if strict and len(lowercase__ ) > 0: raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(lowercase__ ) > 0: print(f"""Found the following incompatible ops for the opset {opset}:""" ) print(*lowercase__ , sep="""\n""" ) else: print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) UpperCamelCase = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
45
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Dict , lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = parent SCREAMING_SNAKE_CASE_ : int = 13 SCREAMING_SNAKE_CASE_ : Optional[int] = 7 SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : List[Any] = True SCREAMING_SNAKE_CASE_ : Any = True SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : Tuple = False SCREAMING_SNAKE_CASE_ : Optional[int] = 2 SCREAMING_SNAKE_CASE_ : List[str] = 99 SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 SCREAMING_SNAKE_CASE_ : Any = 32 SCREAMING_SNAKE_CASE_ : List[str] = 2 SCREAMING_SNAKE_CASE_ : int = 4 SCREAMING_SNAKE_CASE_ : List[str] = 0.1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 512 SCREAMING_SNAKE_CASE_ : List[str] = 16 SCREAMING_SNAKE_CASE_ : str = 2 SCREAMING_SNAKE_CASE_ : Optional[int] = 0.02 SCREAMING_SNAKE_CASE_ : Optional[int] = 3 SCREAMING_SNAKE_CASE_ : Optional[int] = 4 SCREAMING_SNAKE_CASE_ : Optional[int] = """last""" SCREAMING_SNAKE_CASE_ : Tuple = True SCREAMING_SNAKE_CASE_ : int = None SCREAMING_SNAKE_CASE_ : Dict = 0 def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_input_lengths: SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) SCREAMING_SNAKE_CASE_ : int = None SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa) SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ : List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = TFFlaubertModel(config=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [input_ids, input_mask] SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} SCREAMING_SNAKE_CASE_ : Any = model(lowerCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__) SCREAMING_SNAKE_CASE_ : int = {"""input_ids""": input_ids, """lengths""": input_lengths} SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCamelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__) SCREAMING_SNAKE_CASE_ : List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths} SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.num_labels SCREAMING_SNAKE_CASE_ : Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_choices SCREAMING_SNAKE_CASE_ : Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__) SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1) , (1, self.num_choices, 1)) SCREAMING_SNAKE_CASE_ : str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1) , (1, self.num_choices, 1)) SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1) , (1, self.num_choices, 1)) SCREAMING_SNAKE_CASE_ : int = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE_ ) : str = config_and_inputs SCREAMING_SNAKE_CASE_ : Optional[Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __UpperCamelCase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __UpperCamelCase = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any]): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = TFFlaubertModelTester(self) SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__) self.assertIsNotNone(lowerCamelCase__) @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''') SCREAMING_SNAKE_CASE_ : Optional[int] = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCamelCase__)[0] SCREAMING_SNAKE_CASE_ : Optional[int] = tf.TensorShape((1, 8, 512)) self.assertEqual(output.shape , lowerCamelCase__) # compare the actual values for a slice. SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor( [ [ [-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18], [-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99], [-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
512
from __future__ import annotations def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]: UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) UpperCamelCase__ :Optional[Any] = { """a""": 0.08497, """b""": 0.01492, """c""": 0.02202, """d""": 0.04253, """e""": 0.11162, """f""": 0.02228, """g""": 0.02015, """h""": 0.06094, """i""": 0.07546, """j""": 0.00153, """k""": 0.01292, """l""": 0.04025, """m""": 0.02406, """n""": 0.06749, """o""": 0.07507, """p""": 0.01929, """q""": 0.00095, """r""": 0.07587, """s""": 0.06327, """t""": 0.09356, """u""": 0.02758, """v""": 0.00978, """w""": 0.02560, """x""": 0.00150, """y""": 0.01994, """z""": 0.00077, } else: # Custom frequencies dictionary UpperCamelCase__ :Optional[int] = frequencies_dict if not case_sensitive: UpperCamelCase__ :int = ciphertext.lower() # Chi squared statistic values UpperCamelCase__ :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowercase__ ) ): UpperCamelCase__ :int = """""" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter UpperCamelCase__ :Optional[int] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: UpperCamelCase__ :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary UpperCamelCase__ :Union[str, Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] UpperCamelCase__ :int = min( lowercase__ , key=lowercase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) :Tuple = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
45
0
from __future__ import annotations def UpperCamelCase__ ( _A: int ): '''simple docstring''' __lowerCamelCase = 2 __lowerCamelCase = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
479
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
45
0
'''simple docstring''' from __future__ import annotations def _A ( UpperCAmelCase ): '''simple docstring''' if not nums: return 0 A__ = nums[0] A__ = 0 for num in nums[1:]: A__ = ( max_excluding + num, max(lowercase__ ,lowercase__ ), ) return max(lowercase__ ,lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
531
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase = get_tests_dir("fixtures") UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json") UpperCamelCase = get_tests_dir("fixtures/dummy-config.json") class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[int] = 0 def __a ( self :str ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ :List[str] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) config.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) # make sure private variable is not incorrectly saved UpperCamelCase__ :Tuple = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Union[str, Any] ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Dict ): with self.assertRaisesRegex( lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def __a ( self :List[Any] ): with self.assertRaisesRegex( lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" ) def __a ( self :int ): with self.assertRaisesRegex( lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def __a ( self :Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def __a ( self :Dict ): try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __a ( self :Optional[int] ): class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : Optional[int] = True try: AutoConfig.register("""custom""" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) # If remote code is not set, the default is to use local UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
45
0
'''simple docstring''' class _lowerCamelCase : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> str: __magic_name__ : List[str] = row __magic_name__ : List[Any] = col __magic_name__ : Dict = graph def __lowerCAmelCase ( self : Dict , _A : int , _A : int , _A : list[list[bool]] ) -> List[str]: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : str , _A : int , _A : int , _A : list[list[bool]] ) -> int: # Checking all 8 elements surrounding nth element __magic_name__ : Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ : Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ : List[Any] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ ) def __lowerCAmelCase ( self : str ) -> Union[str, Any]: # And finally, count all islands. __magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ : str = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) count += 1 return count
561
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ): super().__init__() UpperCamelCase__ :Tuple = value_function UpperCamelCase__ :Optional[int] = unet UpperCamelCase__ :List[str] = scheduler UpperCamelCase__ :Dict = env UpperCamelCase__ :Dict = env.get_dataset() UpperCamelCase__ :Union[str, Any] = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].mean() except: # noqa: E722 pass UpperCamelCase__ :Any = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].std() except: # noqa: E722 pass UpperCamelCase__ :List[Any] = env.observation_space.shape[0] UpperCamelCase__ :List[str] = env.action_space.shape[0] def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ): return (x_in - self.means[key]) / self.stds[key] def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): return x_in * self.stds[key] + self.means[key] def __a ( self :Any , lowerCamelCase__ :int ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): for key, val in cond.items(): UpperCamelCase__ :str = val.clone() return x_in def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = x.shape[0] UpperCamelCase__ :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ ) UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance ) UpperCamelCase__ :Dict = model_std * grad UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Dict = x.detach() UpperCamelCase__ :int = x + scale * grad UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ): # normalize the observations and create batch dimension UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" ) UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 ) UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )} UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device ) UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() UpperCamelCase__ :Dict = x[sorted_idx] UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim] UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy() UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: UpperCamelCase__ :List[str] = 0 else: # if we didn't run value guiding, select a random action UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ ) UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0] return denorm_actions
45
0
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowerCamelCase = logging.getLogger(__name__) def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" if os.path.exists(lowercase__ ): if os.path.exists(os.path.join(lowercase__ , "config.json" ) ) and os.path.isfile( os.path.join(lowercase__ , "config.json" ) ): os.remove(os.path.join(lowercase__ , "config.json" ) ) if os.path.exists(os.path.join(lowercase__ , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(lowercase__ , "pytorch_model.bin" ) ): os.remove(os.path.join(lowercase__ , "pytorch_model.bin" ) ) else: os.makedirs(lowercase__ ) model.save_pretrained(lowercase__ ) def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = 2 if unlogit: UpperCAmelCase_ : int = torch.pow(lowercase__ , lowercase__ ) UpperCAmelCase_ : List[Any] = p * torch.log(lowercase__ ) UpperCAmelCase_ : Tuple = 0 return -plogp.sum(dim=-1 ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(lowercase__ ) ) ) ) for row in range(len(lowercase__ ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Any=False ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Dict = model.config.num_hidden_layers, model.config.num_attention_heads UpperCAmelCase_ : Optional[Any] = torch.zeros(lowercase__ , lowercase__ ).to(args.device ) UpperCAmelCase_ : List[Any] = torch.zeros(lowercase__ , lowercase__ ).to(args.device ) if head_mask is None: UpperCAmelCase_ : Tuple = torch.ones(lowercase__ , lowercase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowercase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: UpperCAmelCase_ : int = None UpperCAmelCase_ : str = 0.0 UpperCAmelCase_ : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(lowercase__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): UpperCAmelCase_ : Optional[Any] = tuple(t.to(args.device ) for t in inputs ) (UpperCAmelCase_ ) : Optional[int] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) UpperCAmelCase_ : List[Any] = model(lowercase__ , labels=lowercase__ , head_mask=lowercase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) UpperCAmelCase_ : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowercase__ ): UpperCAmelCase_ : Any = entropy(attn.detach() , lowercase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowercase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : int = torch.pow(torch.pow(lowercase__ , lowercase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: UpperCAmelCase_ : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(lowercase__ ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(lowercase__ ) logger.info("Head ranked by importance scores" ) UpperCAmelCase_ : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) UpperCAmelCase_ : List[Any] = torch.arange( head_importance.numel() , device=args.device ) UpperCAmelCase_ : List[Any] = head_ranks.view_as(lowercase__ ) print_ad_tensor(lowercase__ ) return attn_entropy, head_importance, total_loss def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = compute_heads_importance(lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ ) UpperCAmelCase_ : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , lowercase__ , original_score * args.masking_threshold ) UpperCAmelCase_ : Tuple = torch.ones_like(lowercase__ ) UpperCAmelCase_ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) UpperCAmelCase_ : Any = original_score while current_score >= original_score * args.masking_threshold: UpperCAmelCase_ : Optional[int] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads UpperCAmelCase_ : Optional[int] = float("Inf" ) UpperCAmelCase_ : Union[str, Any] = head_importance.view(-1 ).sort()[1] if len(lowercase__ ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads UpperCAmelCase_ : int = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) UpperCAmelCase_ : str = new_head_mask.view(-1 ) UpperCAmelCase_ : Union[str, Any] = 0.0 UpperCAmelCase_ : Union[str, Any] = new_head_mask.view_as(lowercase__ ) UpperCAmelCase_ : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(lowercase__ ) # Compute metric and head importance again UpperCAmelCase_ : Optional[int] = compute_heads_importance( lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , head_mask=lowercase__ ) UpperCAmelCase_ : Tuple = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , lowercase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info("Final head mask" ) print_ad_tensor(lowercase__ ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = datetime.now() UpperCAmelCase_ : Union[str, Any] = compute_heads_importance( lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ ) UpperCAmelCase_ : Dict = 1 / loss UpperCAmelCase_ : Optional[Any] = datetime.now() - before_time UpperCAmelCase_ : str = sum(p.numel() for p in model.parameters() ) UpperCAmelCase_ : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowercase__ , lowercase__ ): UpperCAmelCase_ : Optional[int] = [ v, ] assert sum(len(lowercase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowercase__ ) UpperCAmelCase_ : int = sum(p.numel() for p in model.parameters() ) UpperCAmelCase_ : Dict = datetime.now() UpperCAmelCase_ : Union[str, Any] = compute_heads_importance( lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ , actually_pruned=lowercase__ , ) UpperCAmelCase_ : List[str] = 1 / loss UpperCAmelCase_ : List[str] = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , lowercase__ , lowercase__ , pruned_num_params / original_num_params * 1_00 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , lowercase__ , lowercase__ ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 ) save_model(lowercase__ , args.output_dir ) def a__ ( ) -> Any: """simple docstring""" UpperCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=lowercase__ , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=lowercase__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=lowercase__ , type=lowercase__ , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=lowercase__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=lowercase__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=lowercase__ , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=lowercase__ , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=1_28 , type=lowercase__ , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=lowercase__ , help="Batch size." ) parser.add_argument("--seed" , type=lowercase__ , default=42 ) parser.add_argument("--local_rank" , type=lowercase__ , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=lowercase__ , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=lowercase__ , default="" , help="Can be used for distant debugging." ) UpperCAmelCase_ : List[str] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowercase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: UpperCAmelCase_ : Dict = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) UpperCAmelCase_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) UpperCAmelCase_ : int = torch.device("cuda" , args.local_rank ) UpperCAmelCase_ : List[str] = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) UpperCAmelCase_ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: UpperCAmelCase_ : Any = nn.parallel.DistributedDataParallel( lowercase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowercase__ ) elif args.n_gpu > 1: UpperCAmelCase_ : Tuple = nn.DataParallel(lowercase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowercase__ ) torch.save(lowercase__ , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , lowercase__ ) # Prepare dataset UpperCAmelCase_ : Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) UpperCAmelCase_ : int = (torch.from_numpy(lowercase__ ),) UpperCAmelCase_ : List[Any] = TensorDataset(*lowercase__ ) UpperCAmelCase_ : Tuple = RandomSampler(lowercase__ ) UpperCAmelCase_ : Optional[int] = DataLoader(lowercase__ , sampler=lowercase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowercase__ , lowercase__ , lowercase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: UpperCAmelCase_ : Optional[int] = mask_heads(lowercase__ , lowercase__ , lowercase__ ) prune_heads(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if __name__ == "__main__": main()
71
def A ( lowercase__ : int ) -> bool: if num < 0: return False UpperCamelCase__ :int = num UpperCamelCase__ :int = 0 while num > 0: UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
45
0
import os def lowerCAmelCase_ (lowerCAmelCase__: str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as input_file: UpperCAmelCase_: Any = [ [int(lowercase__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] UpperCAmelCase_: int = len(lowercase__ ) UpperCAmelCase_: Union[str, Any] = len(matrix[0] ) UpperCAmelCase_: Optional[int] = [[-1 for _ in range(lowercase__ )] for _ in range(lowercase__ )] for i in range(lowercase__ ): UpperCAmelCase_: List[Any] = matrix[i][0] for j in range(1 , lowercase__ ): for i in range(lowercase__ ): UpperCAmelCase_: List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowercase__ ): UpperCAmelCase_: Optional[Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): UpperCAmelCase_: Optional[int] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'''{solution() = }''')
556
from __future__ import annotations def A ( lowercase__ : list[int] ) -> bool: return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
45
0