code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __a = logging.get_logger(__name__) __a = {'vocab_file': 'spiece.model'} __a = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } __a = { 'AI-Sweden/gpt-sw3-126m': 2_048, 'AI-Sweden/gpt-sw3-350m': 2_048, 'AI-Sweden/gpt-sw3-1.6b': 2_048, 'AI-Sweden/gpt-sw3-6.7b': 2_048, 'AI-Sweden/gpt-sw3-20b': 2_048, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None: UpperCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) UpperCAmelCase_ : Optional[int] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing UpperCAmelCase_ : Union[str, Any] = '''<|endoftext|>''' if eos_token is None else eos_token UpperCAmelCase_ : Optional[int] = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: UpperCAmelCase_ : int = unk_token if pad_token is None else pad_token UpperCAmelCase_ : List[Any] = eos_token if bos_token is None else bos_token else: UpperCAmelCase_ : Optional[Any] = '''<pad>''' if pad_token is None else pad_token UpperCAmelCase_ : str = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE ,remove_space=_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : Optional[int] = do_lower_case UpperCAmelCase_ : Any = remove_space UpperCAmelCase_ : Dict = keep_accents UpperCAmelCase_ : Dict = vocab_file UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off UpperCAmelCase_ : Union[str, Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing UpperCAmelCase_ : Tuple = re.compile( f'''[{"".join(map(_SCREAMING_SNAKE_CASE ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8_203] ) )}]''' ) def __getstate__( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : int = None return state def __setstate__( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : int = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): UpperCAmelCase_ : List[str] = {} UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a__ ( self ) -> int: return len(self.sp_model ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : Optional[Any] = self.non_printing_characters_re.sub('''''' ,_SCREAMING_SNAKE_CASE ) # Normalize whitespaces UpperCAmelCase_ : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFC''' ,_SCREAMING_SNAKE_CASE ) return text def a__ ( self ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.preprocess_text(_SCREAMING_SNAKE_CASE ) return self.sp_model.encode(_SCREAMING_SNAKE_CASE ,out_type=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) @staticmethod def a__ ( _SCREAMING_SNAKE_CASE ) -> str: return out_string def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : int = '''''' UpperCAmelCase_ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token UpperCAmelCase_ : str = True UpperCAmelCase_ : int = [] else: current_sub_tokens.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = False out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) return out_string def a__ ( self ) -> Dict[str, int]: UpperCAmelCase_ : int = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : int = os.path.join( _SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE ,'''wb''' ) as fi: UpperCAmelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : str = self.preprocess_text(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = self.sp_model.encode(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Any = [self.preprocess_text(_SCREAMING_SNAKE_CASE ) for t in text] UpperCAmelCase_ : Optional[int] = self.sp_model.encode(_SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": UpperCAmelCase_ : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE ) return token_ids def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: return self.sp_model.decode(_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[int]: UpperCAmelCase_ : str = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] UpperCAmelCase_ : Any = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(_SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=_SCREAMING_SNAKE_CASE )
30
from __future__ import annotations import math __a = '2020.9.26' __a = 'xcodz-dot, cclaus, dhruvmanila' def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ): UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase_ : Optional[Any] = locals() del input_variables["axis"] if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase_ : List[Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(_lowercase ) UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase ) UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z elif axis == "x": UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase ) UpperCAmelCase_ : Dict = x elif axis == "y": UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) snake_case__ : Tuple = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys snake_case__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
702
snake_case__ : List[Any] = """Tobias Carryer""" from time import time class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=int(time() ) ) -> Tuple: # noqa: B008 UpperCamelCase_ = multiplier UpperCamelCase_ = increment UpperCamelCase_ = modulo UpperCamelCase_ = seed def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. snake_case__ : List[Any] = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1) while True: print(lcg.next_number())
618
0
"""simple docstring""" import numpy class lowercase__ : """simple docstring""" def __init__( self , _A , _A ): '''simple docstring''' UpperCamelCase : Dict = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. UpperCamelCase : Optional[Any] = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. UpperCamelCase : Union[str, Any] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. UpperCamelCase : Dict = numpy.random.rand(3 , 1 ) # Real output values provided. UpperCamelCase : Dict = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. UpperCamelCase : Optional[Any] = numpy.zeros(output_array.shape ) def _a ( self ): '''simple docstring''' UpperCamelCase : Tuple = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. UpperCamelCase : Optional[int] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. UpperCamelCase : Any = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _a ( self ): '''simple docstring''' UpperCamelCase : Dict = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) UpperCamelCase : List[str] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) UpperCamelCase : List[str] = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _a ( self , _A , _A , _A ): '''simple docstring''' for iteration in range(1 , iterations + 1 ): UpperCamelCase : int = self.feedforward() self.back_propagation() if give_loss: UpperCamelCase : Dict = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f"""Iteration {iteration} Loss: {loss}""" ) def _a ( self , _A ): '''simple docstring''' UpperCamelCase : List[Any] = input_arr UpperCamelCase : int = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) UpperCamelCase : str = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) UpperCamelCase : int = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return 1 / (1 + numpy.exp(-value )) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return (value) * (1 - (value)) def UpperCamelCase (): UpperCamelCase : str = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. UpperCamelCase : Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. UpperCamelCase : Dict = TwoHiddenLayerNeuralNetwork( input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
102
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowerCAmelCase : '''simple docstring''' def __init__( self : str , __a : str = "cpu" , __a : str = "openai/clip-vit-large-patch14" ) -> None: """simple docstring""" __lowercase : Tuple = device __lowercase : Union[str, Any] = CLIPTokenizerFast.from_pretrained(__a ) __lowercase : int = [0.48145466, 0.4578275, 0.40821073] __lowercase : Optional[Any] = [0.26862954, 0.26130258, 0.27577711] __lowercase : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std ) __lowercase : Optional[int] = torchvision.transforms.Resize(224 ) __lowercase : List[Any] = torchvision.transforms.CenterCrop(224 ) def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Tuple: """simple docstring""" __lowercase : Any = self.resize(__a ) __lowercase : Tuple = self.center_crop(__a ) __lowercase : Any = self.normalize(__a ) return images def __call__( self : Any , __a : Optional[Any]=None , __a : List[Any]=None , **__a : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase : List[str] = self.tokenizer(text=__a , **__a ) __lowercase : List[str] = self.preprocess_img(__a ) __lowercase : Tuple = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , __a : Tuple=10 , __a : Optional[int]=0.01 , __a : Optional[Any]=None , __a : Any=None , __a : List[str]=None , __a : Optional[Any]=None , __a : Optional[int]=None , __a : List[str]=None , __a : Optional[Any]=False , __a : int=True , __a : str="image" , __a : List[str]=True , __a : Tuple=False , __a : Optional[Any]=False , __a : Dict=False , ) -> None: """simple docstring""" super().__init__() __lowercase : int = None __lowercase : List[Any] = device if device else get_device() if vqgan: __lowercase : Union[str, Any] = vqgan else: __lowercase : Dict = load_vqgan(self.device , conf_path=__a , ckpt_path=__a ) self.vqgan.eval() if clip: __lowercase : Any = clip else: __lowercase : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) __lowercase : Any = ProcessorGradientFlow(device=self.device ) __lowercase : List[Any] = iterations __lowercase : List[Any] = lr __lowercase : Union[str, Any] = log __lowercase : List[Any] = make_grid __lowercase : str = return_val __lowercase : str = quantize __lowercase : Dict = self.vqgan.decoder.z_shape def lowerCAmelCase ( self : List[str] , __a : List[Any]=None , __a : int=None , __a : str=5 , __a : Union[str, Any]=True ) -> List[Any]: """simple docstring""" __lowercase : Optional[Any] = [] if output_path is None: __lowercase : Optional[int] = """./animation.gif""" if input_path is None: __lowercase : Any = self.save_path __lowercase : Any = sorted(glob(input_path + """/*""" ) ) if not len(__a ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(__a ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) __lowercase : Any = total_duration / len(__a ) __lowercase : int = [frame_duration] * len(__a ) if extend_frames: __lowercase : Optional[Any] = 1.5 __lowercase : Optional[Any] = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(__a ) ) imageio.mimsave(__a , __a , duration=__a ) print(F"gif saved to {output_path}" ) def lowerCAmelCase ( self : Dict , __a : int=None , __a : Dict=None ) -> Optional[int]: """simple docstring""" if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError __lowercase : Dict = preprocess(Image.open(__a ) , target_image_size=256 ).to(self.device ) __lowercase : Optional[Any] = preprocess_vqgan(__a ) __lowercase , *__lowercase : Optional[int] = self.vqgan.encode(__a ) return z def lowerCAmelCase ( self : str , __a : Optional[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.latent.detach().requires_grad_() __lowercase : Union[str, Any] = base_latent + transform_vector if self.quantize: __lowercase , *__lowercase : List[Any] = self.vqgan.quantize(__a ) else: __lowercase : Dict = trans_latent return self.vqgan.decode(__a ) def lowerCAmelCase ( self : Tuple , __a : Optional[int] , __a : List[Any] , __a : int=None ) -> Optional[int]: """simple docstring""" __lowercase : Dict = self.clip_preprocessor(text=__a , images=__a , return_tensors="""pt""" , padding=__a ) __lowercase : Optional[int] = self.clip(**__a ) __lowercase : str = clip_outputs.logits_per_image if weights is not None: __lowercase : Union[str, Any] = similarity_logits * weights return similarity_logits.sum() def lowerCAmelCase ( self : str , __a : str , __a : Dict , __a : List[str] ) -> Optional[int]: """simple docstring""" __lowercase : str = self._get_clip_similarity(pos_prompts["""prompts"""] , __a , weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: __lowercase : Dict = self._get_clip_similarity(neg_prompts["""prompts"""] , __a , weights=neg_prompts["""weights"""] ) else: __lowercase : int = torch.tensor([1] , device=self.device ) __lowercase : Optional[int] = -torch.log(__a ) + torch.log(__a ) return loss def lowerCAmelCase ( self : Any , __a : Dict , __a : Optional[int] , __a : Any ) -> str: """simple docstring""" __lowercase : str = torch.randn_like(self.latent , requires_grad=__a , device=self.device ) __lowercase : Union[str, Any] = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() __lowercase : Any = self._add_vector(__a ) __lowercase : Dict = loop_post_process(__a ) __lowercase : Union[str, Any] = self._get_CLIP_loss(__a , __a , __a ) print("""CLIP loss""" , __a ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=__a ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def lowerCAmelCase ( self : str , __a : str , __a : Any , __a : Optional[Any] ) -> Dict: """simple docstring""" wandb.init(reinit=__a , project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: __lowercase : str = Image.open(__a ) __lowercase : Optional[int] = image.resize((256, 256) ) wandb.log("""Original Image""" , wandb.Image(__a ) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Tuple ) -> List[Any]: """simple docstring""" if not prompts: return [] __lowercase : List[str] = [] __lowercase : Any = [] if isinstance(__a , __a ): __lowercase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(__a , (tuple, list) ): __lowercase : List[Any] = prompt[0] __lowercase : Union[str, Any] = float(prompt[1] ) elif ":" in prompt: __lowercase , __lowercase : Optional[int] = prompt.split(""":""" ) __lowercase : int = float(__a ) else: __lowercase : Optional[int] = prompt __lowercase : Any = 1.0 processed_prompts.append(__a ) weights.append(__a ) return { "prompts": processed_prompts, "weights": torch.tensor(__a , device=self.device ), } def lowerCAmelCase ( self : int , __a : Tuple , __a : int=None , __a : List[str]=None , __a : Optional[int]=True , __a : str=False , __a : List[Any]=True , __a : Optional[Any]=True , __a : Dict=None , ) -> str: """simple docstring""" if image_path: __lowercase : int = self._get_latent(__a ) else: __lowercase : str = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(__a , __a , __a ) assert pos_prompts, "You must provide at least one positive prompt." __lowercase : int = self.process_prompts(__a ) __lowercase : Dict = self.process_prompts(__a ) if save_final and save_path is None: __lowercase : Tuple = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(__a ): os.makedirs(__a ) else: __lowercase : Any = save_path + """_""" + get_timestamp() os.makedirs(__a ) __lowercase : Tuple = save_path __lowercase : Tuple = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(__a ) ) __lowercase : List[Any] = loop_post_process(__a ) for iter, transformed_img in enumerate(self._optimize_CLIP(__a , __a , __a ) ): if show_intermediate: show_pil(__a ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(__a )} ) if show_final: show_pil(__a ) if save_final: transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
149
0
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def snake_case ( self : str ): """simple docstring""" _lowercase = inspect.getfile(accelerate.test_utils ) _lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) _lowercase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def snake_case ( self : List[Any] ): """simple docstring""" _lowercase = f""" {self.test_dir}/xla_spawn.py --num_cores 8 {self.test_file_path} """.split() _lowercase = [sys.executable] + distributed_args execute_subprocess_async(__A , env=os.environ.copy() )
602
'''simple docstring''' import re import string import numpy as np import datasets __magic_name__ : Optional[Any] = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' __magic_name__ : Tuple = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' __magic_name__ : Any = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def snake_case ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def snake_case ( self : List[Any] , __A : int , __A : List[Any] , __A : List[str]=None , __A : Tuple=False , __A : Dict=False , __A : Optional[int]=False , ): """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowercase = np.array([re.sub(__A , "" , __A ) for x in predictions] ) _lowercase = np.array([re.sub(__A , "" , __A ) for x in references] ) else: _lowercase = np.asarray(__A ) _lowercase = np.asarray(__A ) if ignore_case: _lowercase = np.char.lower(__A ) _lowercase = np.char.lower(__A ) if ignore_punctuation: _lowercase = string.punctuation.maketrans("" , "" , string.punctuation ) _lowercase = np.char.translate(__A , table=__A ) _lowercase = np.char.translate(__A , table=__A ) if ignore_numbers: _lowercase = string.digits.maketrans("" , "" , string.digits ) _lowercase = np.char.translate(__A , table=__A ) _lowercase = np.char.translate(__A , table=__A ) _lowercase = predictions == references return {"exact_match": np.mean(__A ) * 1_0_0}
602
1
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : '''simple docstring''' def __init__( self : str ,_snake_case : Tuple ,_snake_case : List[str]=13 ,_snake_case : Optional[int]=32 ,_snake_case : Optional[Any]=2 ,_snake_case : Tuple=3 ,_snake_case : Any=16 ,_snake_case : List[str]=[32, 64, 128] ,_snake_case : Optional[Any]=[1, 2, 1] ,_snake_case : Optional[Any]=[2, 2, 4] ,_snake_case : List[str]=2 ,_snake_case : Dict=2.0 ,_snake_case : Union[str, Any]=True ,_snake_case : Optional[Any]=0.0 ,_snake_case : str=0.0 ,_snake_case : Dict=0.1 ,_snake_case : Any="gelu" ,_snake_case : List[str]=False ,_snake_case : str=True ,_snake_case : List[Any]=0.02 ,_snake_case : Dict=1e-5 ,_snake_case : List[Any]=True ,_snake_case : Dict=None ,_snake_case : Optional[Any]=True ,_snake_case : Optional[int]=10 ,_snake_case : Optional[int]=8 ,_snake_case : Optional[Any]=["stage1", "stage2"] ,_snake_case : Optional[int]=[1, 2] ,) -> List[Any]: """simple docstring""" lowercase__ : Optional[Any] = parent lowercase__ : str = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Optional[int] = patch_size lowercase__ : Union[str, Any] = num_channels lowercase__ : Optional[int] = embed_dim lowercase__ : List[str] = hidden_sizes lowercase__ : Dict = depths lowercase__ : List[Any] = num_heads lowercase__ : List[Any] = window_size lowercase__ : Any = mlp_ratio lowercase__ : Optional[int] = qkv_bias lowercase__ : str = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Optional[Any] = drop_path_rate lowercase__ : Dict = hidden_act lowercase__ : Tuple = use_absolute_embeddings lowercase__ : List[Any] = patch_norm lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Tuple = initializer_range lowercase__ : str = is_training lowercase__ : Optional[Any] = scope lowercase__ : Tuple = use_labels lowercase__ : List[Any] = type_sequence_label_size lowercase__ : Any = encoder_stride lowercase__ : Tuple = out_features lowercase__ : str = out_indices def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : Optional[int] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : List[str] ) -> str: """simple docstring""" return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def UpperCAmelCase ( self : str ,_snake_case : Tuple ,_snake_case : Tuple ,_snake_case : Optional[int] ) -> Dict: """simple docstring""" lowercase__ : Dict = FocalNetModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Dict = model(_snake_case ) lowercase__ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Optional[Any] ) -> Any: """simple docstring""" lowercase__ : int = FocalNetBackbone(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Tuple = model(_snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowercase__ : Tuple = None lowercase__ : List[str] = FocalNetBackbone(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : int = model(_snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" lowercase__ : Any = FocalNetForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ : Union[str, Any] = 1 lowercase__ : Dict = FocalNetForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : Tuple = model(_snake_case ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase ( self : int ,_snake_case : List[Any] ,_snake_case : List[str] ,_snake_case : Any ) -> Optional[int]: """simple docstring""" lowercase__ : str = self.type_sequence_label_size lowercase__ : List[Any] = FocalNetForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Optional[Any] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ : List[str] = 1 lowercase__ : Dict = FocalNetForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : Optional[int] = model(_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : List[str] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowerCAmelCase : Dict = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : Any = False lowerCAmelCase : Optional[int] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : Tuple = False def UpperCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" lowercase__ : Union[str, Any] = FocalNetModelTester(self ) lowercase__ : Dict = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 ,has_text_modality=_snake_case ) def UpperCAmelCase ( self : Any ) -> int: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" return def UpperCAmelCase ( self : Any ) -> Dict: """simple docstring""" lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : int ) -> Dict: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_snake_case ) def UpperCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def UpperCAmelCase ( self : int ) -> str: """simple docstring""" lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @unittest.skip(reason='''FocalNet does not use inputs_embeds''' ) def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='''FocalNet does not use feedforward chunking''' ) def UpperCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowercase__ : Any = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowercase__ : List[str] = model_class(_snake_case ) lowercase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple = [*signature.parameters.keys()] lowercase__ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : str ,_snake_case : Optional[Any] ,_snake_case : Tuple ) -> Any: """simple docstring""" lowercase__ : int = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) ) lowercase__ : Tuple = outputs.hidden_states lowercase__ : Union[str, Any] = getattr( self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) ,_snake_case ) # FocalNet has a different seq_length lowercase__ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowercase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) ,_snake_case ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = reshaped_hidden_states[0].shape lowercase__ : Tuple = ( reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCAmelCase ( self : Any ) -> Tuple: """simple docstring""" lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowercase__ : List[str] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case ) def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = 3 lowercase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowercase__ : Union[str, Any] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Union[str, Any] = True self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) ) @slow def UpperCAmelCase ( self : List[str] ) -> int: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = FocalNetModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def UpperCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: lowercase__ : int = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @require_vision @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None @slow def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : List[str] = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_snake_case ) lowercase__ : Tuple = self.default_image_processor lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): lowercase__ : Dict = model(**_snake_case ) # verify the logits lowercase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : List[Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class __A ( A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : str = (FocalNetBackbone,) if is_torch_available() else () lowerCAmelCase : List[Any] = FocalNetConfig lowerCAmelCase : Union[str, Any] = False def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" lowercase__ : Any = FocalNetModelTester(self )
560
"""simple docstring""" lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.3_5_5_8_1_8, } def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowercase__ : Optional[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
560
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = { "configuration_blip_2": [ "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Blip2Config", "Blip2QFormerConfig", "Blip2VisionConfig", ], "processing_blip_2": ["Blip2Processor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Blip2Model", "Blip2QFormerModel", "Blip2PreTrainedModel", "Blip2ForConditionalGeneration", "Blip2VisionModel", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def UpperCamelCase ( _a , _a=0.999 , _a="cosine" , ) -> Dict: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_a ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_a ): return math.exp(t * -12.0 ) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" ) lowercase_ :Any = [] for i in range(_a ): lowercase_ :List[str] = i / num_diffusion_timesteps lowercase_ :str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_a ) / alpha_bar_fn(_a ) , _a ) ) return torch.tensor(_a , dtype=torch.floataa ) class UpperCamelCase ( lowercase__ , lowercase__ ): '''simple docstring''' lowercase : Tuple =[e.name for e in KarrasDiffusionSchedulers] lowercase : Tuple =2 @register_to_config def __init__( self , UpperCamelCase_ = 1000 , UpperCamelCase_ = 0.0_0085 , UpperCamelCase_ = 0.012 , UpperCamelCase_ = "linear" , UpperCamelCase_ = None , UpperCamelCase_ = "epsilon" , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = "linspace" , UpperCamelCase_ = 0 , ): if trained_betas is not None: lowercase_ :int = torch.tensor(UpperCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "linear": lowercase_ :List[str] = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowercase_ :int = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowercase_ :Optional[int] = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": lowercase_ :Dict = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='''exp''' ) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" ) lowercase_ :str = 1.0 - self.betas lowercase_ :Optional[int] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowercase_ :str = use_karras_sigmas def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ): if schedule_timesteps is None: lowercase_ :List[str] = self.timesteps lowercase_ :int = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowercase_ :Dict = 1 if len(UpperCamelCase_ ) > 1 else 0 else: lowercase_ :Any = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep lowercase_ :Union[str, Any] = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCamelCase ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , ): lowercase_ :List[str] = self.index_for_timestep(UpperCamelCase_ ) lowercase_ :Optional[Any] = self.sigmas[step_index] lowercase_ :Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ): lowercase_ :Optional[Any] = num_inference_steps lowercase_ :Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowercase_ :Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy() elif self.config.timestep_spacing == "leading": lowercase_ :Any = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase_ :List[Any] = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowercase_ :Dict = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase_ :Dict = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ ) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) lowercase_ :Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowercase_ :Tuple = np.log(UpperCamelCase_ ) lowercase_ :Dict = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ ) if self.config.use_karras_sigmas: lowercase_ :int = self._convert_to_karras(in_sigmas=UpperCamelCase_ , num_inference_steps=self.num_inference_steps ) lowercase_ :Optional[int] = np.array([self._sigma_to_t(UpperCamelCase_ , UpperCamelCase_ ) for sigma in sigmas] ) lowercase_ :Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowercase_ :Optional[int] = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ ) lowercase_ :Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) lowercase_ :Any = torch.from_numpy(UpperCamelCase_ ) lowercase_ :List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(UpperCamelCase_ ).startswith('''mps''' ): # mps does not support float64 lowercase_ :int = timesteps.to(UpperCamelCase_ , dtype=torch.floataa ) else: lowercase_ :Optional[Any] = timesteps.to(device=UpperCamelCase_ ) # empty dt and derivative lowercase_ :List[str] = None lowercase_ :List[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowercase_ :int = defaultdict(UpperCamelCase_ ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ): # get log sigma lowercase_ :Union[str, Any] = np.log(UpperCamelCase_ ) # get distribution lowercase_ :Optional[Any] = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range lowercase_ :List[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) lowercase_ :str = low_idx + 1 lowercase_ :Any = log_sigmas[low_idx] lowercase_ :int = log_sigmas[high_idx] # interpolate sigmas lowercase_ :Dict = (low - log_sigma) / (low - high) lowercase_ :str = np.clip(UpperCamelCase_ , 0 , 1 ) # transform interpolation to time range lowercase_ :Dict = (1 - w) * low_idx + w * high_idx lowercase_ :int = t.reshape(sigma.shape ) return t def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ): lowercase_ :float = in_sigmas[-1].item() lowercase_ :float = in_sigmas[0].item() lowercase_ :int = 7.0 # 7.0 is the value used in the paper lowercase_ :Optional[Any] = np.linspace(0 , 1 , UpperCamelCase_ ) lowercase_ :List[str] = sigma_min ** (1 / rho) lowercase_ :List[Any] = sigma_max ** (1 / rho) lowercase_ :Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def UpperCamelCase ( self ): return self.dt is None def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , ): lowercase_ :Any = self.index_for_timestep(UpperCamelCase_ ) # advance index counter by 1 lowercase_ :Any = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowercase_ :Optional[int] = self.sigmas[step_index] lowercase_ :List[Any] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method lowercase_ :Optional[int] = self.sigmas[step_index - 1] lowercase_ :Dict = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowercase_ :List[Any] = 0 lowercase_ :List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowercase_ :Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next lowercase_ :List[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowercase_ :Dict = sigma_hat if self.state_in_first_order else sigma_next lowercase_ :Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": lowercase_ :List[str] = model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) if self.config.clip_sample: lowercase_ :str = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowercase_ :Optional[int] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowercase_ :Optional[int] = sigma_next - sigma_hat # store for 2nd order step lowercase_ :str = derivative lowercase_ :Union[str, Any] = dt lowercase_ :Optional[int] = sample else: # 2. 2nd order / Heun's method lowercase_ :str = (sample - pred_original_sample) / sigma_next lowercase_ :List[str] = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample lowercase_ :Union[str, Any] = self.dt lowercase_ :Any = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" lowercase_ :List[Any] = None lowercase_ :List[str] = None lowercase_ :Dict = None lowercase_ :int = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase_ ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples lowercase_ :List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ): # mps does not support float64 lowercase_ :Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowercase_ :Tuple = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowercase_ :Union[str, Any] = self.timesteps.to(original_samples.device ) lowercase_ :int = timesteps.to(original_samples.device ) lowercase_ :int = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps] lowercase_ :Tuple = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowercase_ :List[str] = sigma.unsqueeze(-1 ) lowercase_ :List[str] = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
441
0
"""simple docstring""" import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Tuple, UpperCAmelCase_ : str, UpperCAmelCase_ : Any=1024 ) -> List[Any]: """simple docstring""" A__ , A__ = [], [] A__ = list(zip(UpperCAmelCase_, UpperCAmelCase_ ) ) A__ , A__ = sorted_examples[0] def is_too_big(UpperCAmelCase_ : str ): return tok(UpperCAmelCase_, return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): A__ = new_src + " " + src A__ = new_tgt + " " + tgt if is_too_big(UpperCAmelCase_ ) or is_too_big(UpperCAmelCase_ ): # cant fit, finalize example finished_src.append(UpperCAmelCase_ ) finished_tgt.append(UpperCAmelCase_ ) A__ , A__ = src, tgt else: # can fit, keep adding A__ , A__ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCAmelCase_ ) finished_tgt.append(UpperCAmelCase_ ) return finished_src, finished_tgt def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Path, UpperCAmelCase_ : int, UpperCAmelCase_ : Dict ) -> str: """simple docstring""" A__ = Path(UpperCAmelCase_ ) save_path.mkdir(exist_ok=UpperCAmelCase_ ) for split in ["train"]: A__ , A__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" A__ = [x.rstrip() for x in Path(UpperCAmelCase_ ).open().readlines()] A__ = [x.rstrip() for x in Path(UpperCAmelCase_ ).open().readlines()] A__ , A__ = pack_examples(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) print(F"""packed {split} split from {len(UpperCAmelCase_ )} examples -> {len(UpperCAmelCase_ )}.""" ) Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(UpperCAmelCase_ ) ) Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(UpperCAmelCase_ ) ) for split in ["val", "test"]: A__ , A__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(UpperCAmelCase_, save_path / F"""{split}.source""" ) shutil.copyfile(UpperCAmelCase_, save_path / F"""{split}.target""" ) def _lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = argparse.ArgumentParser() parser.add_argument("--tok_name", type=UpperCAmelCase_, help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len", type=UpperCAmelCase_, default=128 ) parser.add_argument("--data_dir", type=UpperCAmelCase_ ) parser.add_argument("--save_path", type=UpperCAmelCase_ ) A__ = parser.parse_args() A__ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCAmelCase_, Path(args.data_dir ), args.max_seq_len, args.save_path ) if __name__ == "__main__": packer_cli()
104
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = { 'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json', # See all ViT models at https://huggingface.co/models?filter=vit } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """vit""" def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=16 , **UpperCamelCase__ , ) -> Union[str, Any]: super().__init__(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = hidden_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : int = intermediate_size lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : int = hidden_dropout_prob lowerCamelCase : Optional[int] = attention_probs_dropout_prob lowerCamelCase : List[str] = initializer_range lowerCamelCase : Optional[int] = layer_norm_eps lowerCamelCase : List[Any] = image_size lowerCamelCase : Union[str, Any] = patch_size lowerCamelCase : Tuple = num_channels lowerCamelCase : Union[str, Any] = qkv_bias lowerCamelCase : Union[str, Any] = encoder_stride class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Any = version.parse("""1.11""" ) @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _lowercase ( self ) -> float: return 1e-4
311
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase =logging.get_logger(__name__) lowerCamelCase ={ "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _lowerCamelCase ( UpperCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = '''poolformer''' def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[6_4, 1_2_8, 3_2_0, 5_1_2] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , **__SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" UpperCamelCase__ : Optional[int] = num_channels UpperCamelCase__ : List[Any] = patch_size UpperCamelCase__ : Optional[Any] = stride UpperCamelCase__ : Dict = padding UpperCamelCase__ : Optional[int] = pool_size UpperCamelCase__ : List[str] = hidden_sizes UpperCamelCase__ : Tuple = mlp_ratio UpperCamelCase__ : List[Any] = depths UpperCamelCase__ : Any = patch_sizes UpperCamelCase__ : Tuple = strides UpperCamelCase__ : Dict = num_encoder_blocks UpperCamelCase__ : Optional[Any] = drop_path_rate UpperCamelCase__ : Any = hidden_act UpperCamelCase__ : Dict = use_layer_scale UpperCamelCase__ : Optional[int] = layer_scale_init_value UpperCamelCase__ : List[str] = initializer_range super().__init__(**__SCREAMING_SNAKE_CASE ) class _lowerCamelCase ( UpperCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = version.parse('''1.11''' ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: """simple docstring""" return 2e-3
462
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase ={ "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase =["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase =[ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase =[ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
462
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( __snake_case : str ): """simple docstring""" _lowerCamelCase : int = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: _lowerCamelCase : Union[str, Any] = 192 _lowerCamelCase : int = 768 _lowerCamelCase : Optional[Any] = 12 _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : str = [800, 1333] _lowerCamelCase : Dict = False elif yolos_name == "yolos_s_dWr": _lowerCamelCase : List[str] = 330 _lowerCamelCase : Tuple = 14 _lowerCamelCase : List[Any] = 6 _lowerCamelCase : Optional[int] = 1320 elif "yolos_s" in yolos_name: _lowerCamelCase : int = 384 _lowerCamelCase : Optional[Any] = 1536 _lowerCamelCase : Union[str, Any] = 12 _lowerCamelCase : Any = 6 elif "yolos_b" in yolos_name: _lowerCamelCase : List[Any] = [800, 1344] _lowerCamelCase : Dict = 91 _lowerCamelCase : int = """huggingface/label-files""" _lowerCamelCase : List[Any] = """coco-detection-id2label.json""" _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()} _lowerCamelCase : int = idalabel _lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()} return config def _snake_case ( __snake_case : dict , __snake_case : YolosConfig , __snake_case : bool = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCamelCase : int = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) _lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Tuple = in_proj_weight[: config.hidden_size, :] _lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] _lowerCamelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCamelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCamelCase : int = in_proj_weight[-config.hidden_size :, :] _lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :] def _snake_case ( __snake_case : str ): """simple docstring""" if "backbone" in name: _lowerCamelCase : Optional[Any] = name.replace("""backbone""" , """vit""" ) if "cls_token" in name: _lowerCamelCase : str = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "det_token" in name: _lowerCamelCase : str = name.replace("""det_token""" , """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: _lowerCamelCase : Any = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" ) if "pos_embed" in name: _lowerCamelCase : Dict = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _lowerCamelCase : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "blocks" in name: _lowerCamelCase : int = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: _lowerCamelCase : Any = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _lowerCamelCase : Any = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _lowerCamelCase : str = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _lowerCamelCase : List[str] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _lowerCamelCase : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _lowerCamelCase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" ) if "class_embed" in name: _lowerCamelCase : Any = name.replace("""class_embed""" , """class_labels_classifier""" ) if "bbox_embed" in name: _lowerCamelCase : Optional[int] = name.replace("""bbox_embed""" , """bbox_predictor""" ) if "vit.norm" in name: _lowerCamelCase : str = name.replace("""vit.norm""" , """vit.layernorm""" ) return name def _snake_case ( __snake_case : dict , __snake_case : YolosForObjectDetection ): """simple docstring""" for key in orig_state_dict.copy().keys(): _lowerCamelCase : List[str] = orig_state_dict.pop(__snake_case ) if "qkv" in key: _lowerCamelCase : Any = key.split(""".""" ) _lowerCamelCase : Dict = int(key_split[2] ) _lowerCamelCase : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: _lowerCamelCase : Dict = val[:dim, :] _lowerCamelCase : str = val[ dim : dim * 2, : ] _lowerCamelCase : Union[str, Any] = val[-dim:, :] else: _lowerCamelCase : Optional[Any] = val[:dim] _lowerCamelCase : Optional[Any] = val[dim : dim * 2] _lowerCamelCase : List[str] = val[-dim:] else: _lowerCamelCase : int = val return orig_state_dict def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCamelCase : Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def _snake_case ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ): """simple docstring""" _lowerCamelCase : str = get_yolos_config(__snake_case ) # load original state_dict _lowerCamelCase : Any = torch.load(__snake_case , map_location="""cpu""" )["""model"""] # load 🤗 model _lowerCamelCase : Dict = YolosForObjectDetection(__snake_case ) model.eval() _lowerCamelCase : Optional[Any] = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by YolosImageProcessor _lowerCamelCase : List[Any] = 800 if yolos_name != """yolos_ti""" else 512 _lowerCamelCase : str = YolosImageProcessor(format="""coco_detection""" , size=__snake_case ) _lowerCamelCase : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _lowerCamelCase : Optional[Any] = model(**__snake_case ) _lowerCamelCase , _lowerCamelCase : List[Any] = outputs.logits, outputs.pred_boxes _lowerCamelCase , _lowerCamelCase : Tuple = None, None if yolos_name == "yolos_ti": _lowerCamelCase : Union[str, Any] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) _lowerCamelCase : Any = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": _lowerCamelCase : List[str] = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) _lowerCamelCase : int = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": _lowerCamelCase : Any = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) _lowerCamelCase : List[str] = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": _lowerCamelCase : Optional[int] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": _lowerCamelCase : Union[str, Any] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) _lowerCamelCase : List[Any] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F'Unknown yolos_name: {yolos_name}' ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__snake_case ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__snake_case ) if push_to_hub: _lowerCamelCase : Any = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) _lowerCamelCase : List[str] = model_mapping[yolos_name] image_processor.push_to_hub(__snake_case , organization="""hustvl""" ) model.push_to_hub(__snake_case , organization="""hustvl""" ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--yolos_name""", default="""yolos_s_200_pre""", type=str, help=( """Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',""" """ 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'.""" ), ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
88
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) A = logging.getLogger() A = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class SCREAMING_SNAKE_CASE ( __snake_case ): """simple docstring""" def __lowerCAmelCase ( self , __UpperCamelCase ): """simple docstring""" os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) snake_case_ = {'source': 'What is love ?', 'target': 'life'} snake_case_ = {'train': 12, 'val': 2, 'test': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: snake_case_ = '\n'.join([contents[field]] * n_lines[split] ) with open(os.path.join(__UpperCamelCase , f"""{split}.{field}""" ) , 'w' ) as f: f.write(__UpperCamelCase ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = "pytorch" ): """simple docstring""" snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = os.path.join(__UpperCamelCase , 'output' ) snake_case_ = os.path.join(__UpperCamelCase , 'data' ) self._create_dummy_data(data_dir=__UpperCamelCase ) snake_case_ = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append('--fp16' ) else: testargs.append('--gpus=0' ) testargs.append('--distributed_backend=ddp_cpu' ) testargs.append('--num_processes=2' ) snake_case_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__UpperCamelCase , env=self.get_env() ) snake_case_ = os.path.join(__UpperCamelCase , 'metrics.json' ) with open(__UpperCamelCase ) as f: snake_case_ = json.load(__UpperCamelCase ) return result @require_torch_gpu def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_gpu @require_ray def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu @require_ray def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
187
0
"""simple docstring""" from math import factorial, radians def lowercase (snake_case__ : float , snake_case__ : int = 18 , snake_case__ : int = 10 ) -> List[str]: '''simple docstring''' lowerCAmelCase = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians lowerCAmelCase = radians(__lowerCAmelCase ) lowerCAmelCase = angle_in_radians lowerCAmelCase = 3 lowerCAmelCase = -1 for _ in range(__lowerCAmelCase ): result += (b * (angle_in_radians**a)) / factorial(__lowerCAmelCase ) lowerCAmelCase = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": __import__('doctest').testmod()
709
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml a = logging.get_logger(__name__) def lowercase (snake_case__ : bool , snake_case__ : bool ) -> Tuple: '''simple docstring''' def run_func(snake_case__ : Any ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : Optional[Any] , **snake_case__ : int ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : int , **snake_case__ : Tuple ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> ["tf.Tensor"]: '''simple docstring''' lowerCAmelCase = random.Random() lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class SCREAMING_SNAKE_CASE__ ( _a ): _a = 42 _a = 42 _a = "TensorFlow" @property def __lowercase ( self : Optional[int] ): return tf.__version__ def __lowercase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ): # initialize GPU on separate process lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) return self._measure_speed(_inference ) def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ): lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) return self._measure_speed(_train ) def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase ) lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) return self._measure_memory(_inference ) def __lowercase ( self : int , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase ) lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) return self._measure_memory(_train ) def __lowercase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ): lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowerCAmelCase = ( hasattr(lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase ) lowerCAmelCase = model_cls(lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , training=lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCAmelCase , training=lowerCAmelCase ) lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def __lowercase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ): lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowerCAmelCase = ( hasattr(lowerCAmelCase , """architectures""" ) and isinstance(config.architectures , lowerCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase ) lowerCAmelCase = model_cls(lowerCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase ) # encoder-decoder has vocab size saved differently lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowerCAmelCase = model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0] lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0] lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables ) return gradients lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def __lowercase ( self : Optional[int] , lowerCAmelCase : List[str] ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCAmelCase = timeit.repeat( lowerCAmelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def __lowercase ( self : Optional[Any] , lowerCAmelCase : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) lowerCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) lowerCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase ) lowerCAmelCase = meminfo.used lowerCAmelCase = Memory(lowerCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) lowerCAmelCase = None else: lowerCAmelCase = measure_peak_memory_cpu(lowerCAmelCase ) lowerCAmelCase = Memory(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCAmelCase = stop_memory_tracing(lowerCAmelCase ) if memory is None: lowerCAmelCase = summary.total else: lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
529
0
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __A : int = "base_with_context" def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowerCAmelCase_ ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase = weights[F"""layers_{lyr_num}"""] UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) UpperCAmelCase = ly_weight['''attention'''] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowerCAmelCase_ ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase = weights[F"""layers_{lyr_num}"""] UpperCAmelCase = ly_weight['''attention'''] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowerCAmelCase_ ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): UpperCAmelCase = weights[F"""layers_{lyr_num}"""] UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) UpperCAmelCase = ly_weight['''self_attention'''] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) UpperCAmelCase = ly_weight['''MultiHeadDotProductAttention_0'''] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict: '''simple docstring''' UpperCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path ) UpperCAmelCase = jnp.tree_util.tree_map(onp.array , lowerCAmelCase_ ) UpperCAmelCase = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] UpperCAmelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) UpperCAmelCase = inference.parse_training_gin_file(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase = inference.InferenceModel(args.checkpoint_path , lowerCAmelCase_ ) UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) UpperCAmelCase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) UpperCAmelCase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) UpperCAmelCase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) UpperCAmelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , lowerCAmelCase_ ) UpperCAmelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , lowerCAmelCase_ ) UpperCAmelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , lowerCAmelCase_ ) UpperCAmelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) UpperCAmelCase = SpectrogramDiffusionPipeline( notes_encoder=lowerCAmelCase_ , continuous_encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , melgan=lowerCAmelCase_ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F'{MODEL}/checkpoint_500000', type=str, required=False, help="Path to the original jax model checkpoint.", ) __A : Union[str, Any] = parser.parse_args() main(args)
130
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def __a ( ) -> int: '''simple docstring''' UpperCAmelCase_, UpperCAmelCase_= 9, 14 # noqa: F841 UpperCAmelCase_= [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] UpperCAmelCase_= defaultdict(lowerCAmelCase_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) UpperCAmelCase_= mst(lowerCAmelCase_ ) UpperCAmelCase_= [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: UpperCAmelCase_= tuple(answer[:2] ) UpperCAmelCase_= tuple(edge[::-1] ) assert edge in result or reverse in result
593
0
import torch def UpperCAmelCase_ ( ) -> Optional[Any]: if torch.cuda.is_available(): __lowercase : Dict = torch.cuda.device_count() else: __lowercase : Optional[Any] = 0 print(F'Successfully ran on {num_gpus} GPUs' ) if __name__ == "__main__": main()
718
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : int = StableDiffusionSAGPipeline A__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS A__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS A__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS A__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS A__ : List[str] = False def snake_case_ ( self : Optional[Any] ): torch.manual_seed(0 ) __lowercase : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __lowercase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0 ) __lowercase : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __lowercase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __lowercase : List[Any] = CLIPTextModel(_snake_case ) __lowercase : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowercase : int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def snake_case_ ( self : Tuple , _snake_case : List[str] , _snake_case : List[str]=0 ): if str(_snake_case ).startswith('''mps''' ): __lowercase : Optional[int] = torch.manual_seed(_snake_case ) else: __lowercase : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) __lowercase : Tuple = { '''prompt''': '''.''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 1.0, '''sag_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def snake_case_ ( self : Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self : Dict ): __lowercase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) __lowercase : Optional[int] = sag_pipe.to(_snake_case ) sag_pipe.set_progress_bar_config(disable=_snake_case ) __lowercase : List[str] = '''.''' __lowercase : List[str] = torch.manual_seed(0 ) __lowercase : List[str] = sag_pipe( [prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) __lowercase : Optional[Any] = output.images __lowercase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase : int = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def snake_case_ ( self : Optional[int] ): __lowercase : Dict = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __lowercase : Any = sag_pipe.to(_snake_case ) sag_pipe.set_progress_bar_config(disable=_snake_case ) __lowercase : List[str] = '''.''' __lowercase : List[str] = torch.manual_seed(0 ) __lowercase : int = sag_pipe( [prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) __lowercase : str = output.images __lowercase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def snake_case_ ( self : int ): __lowercase : List[str] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __lowercase : Tuple = sag_pipe.to(_snake_case ) sag_pipe.set_progress_bar_config(disable=_snake_case ) __lowercase : Union[str, Any] = '''.''' __lowercase : Tuple = torch.manual_seed(0 ) __lowercase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , ) __lowercase : Dict = output.images assert image.shape == (1, 512, 768, 3)
284
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase ={ "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase =["VisionEncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase =["TFVisionEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase =["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
617
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase__ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
41
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( UpperCAmelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = KandinskyImgaImgPipeline SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] SCREAMING_SNAKE_CASE_ : Tuple = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] SCREAMING_SNAKE_CASE_ : Any = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] SCREAMING_SNAKE_CASE_ : Tuple = False @property def __UpperCAmelCase ( self : Any ) -> str: return 32 @property def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: return 32 @property def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return self.time_input_dim @property def __UpperCAmelCase ( self : Tuple ) -> List[Any]: return self.time_input_dim * 4 @property def __UpperCAmelCase ( self : List[str] ) -> Dict: return 100 @property def __UpperCAmelCase ( self : Tuple ) -> Dict: _lowercase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def __UpperCAmelCase ( self : List[Any] ) -> Any: torch.manual_seed(0 ) _lowercase = MCLIPConfig( numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,) _lowercase = MultilingualCLIP(__A ) _lowercase = text_encoder.eval() return text_encoder @property def __UpperCAmelCase ( self : Any ) -> Tuple: torch.manual_seed(0 ) _lowercase = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowercase = UNetaDConditionModel(**__A ) return model @property def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCAmelCase ( self : int ) -> Union[str, Any]: torch.manual_seed(0 ) _lowercase = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: _lowercase = self.dummy_text_encoder _lowercase = self.dummy_tokenizer _lowercase = self.dummy_unet _lowercase = self.dummy_movq _lowercase = { 'num_train_timesteps': 1000, 'beta_schedule': 'linear', 'beta_start': 0.00085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } _lowercase = DDIMScheduler(**__A ) _lowercase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __UpperCAmelCase ( self : Tuple ,__A : int ,__A : List[Any]=0 ) -> Dict: _lowercase = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(__A ) ).to(__A ) _lowercase = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(__A ) # create init_image _lowercase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__A ) ).to(__A ) _lowercase = image.cpu().permute(0 ,2 ,3 ,1 )[0] _lowercase = Image.fromarray(np.uinta(__A ) ).convert('RGB' ).resize((256, 256) ) if str(__A ).startswith('mps' ): _lowercase = torch.manual_seed(__A ) else: _lowercase = torch.Generator(device=__A ).manual_seed(__A ) _lowercase = { 'prompt': 'horse', 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def __UpperCAmelCase ( self : List[Any] ) -> Tuple: _lowercase = 'cpu' _lowercase = self.get_dummy_components() _lowercase = self.pipeline_class(**__A ) _lowercase = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _lowercase = pipe(**self.get_dummy_inputs(__A ) ) _lowercase = output.images _lowercase = pipe( **self.get_dummy_inputs(__A ) ,return_dict=__A ,)[0] _lowercase = image[0, -3:, -3:, -1] _lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowercase = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A_ ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : str ) -> str: _lowercase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_img2img_frog.npy' ) _lowercase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowercase = 'A red cartoon frog, 4k' _lowercase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa ) pipe_prior.to(__A ) _lowercase = KandinskyImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1' ,torch_dtype=torch.floataa ) _lowercase = pipeline.to(__A ) pipeline.set_progress_bar_config(disable=__A ) _lowercase = torch.Generator(device='cpu' ).manual_seed(0 ) _lowercase , _lowercase = pipe_prior( __A ,generator=__A ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() _lowercase = pipeline( __A ,image=__A ,image_embeds=__A ,negative_image_embeds=__A ,generator=__A ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,) _lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__A ,__A )
535
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> Dict: _lowercase = len(snake_case__ ) while cur > 1: # Find the maximum number in arr _lowercase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowercase = arr[mi::-1] + arr[mi + 1 : len(snake_case__ )] # Reverse whole list _lowercase = arr[cur - 1 :: -1] + arr[cur : len(snake_case__ )] cur -= 1 return arr if __name__ == "__main__": snake_case = input("""Enter numbers separated by a comma:\n""").strip() snake_case = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
535
1
'''simple docstring''' import math def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 ) -> list: """simple docstring""" _SCREAMING_SNAKE_CASE = end or len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = i _SCREAMING_SNAKE_CASE = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _SCREAMING_SNAKE_CASE = array[temp_index - 1] temp_index -= 1 _SCREAMING_SNAKE_CASE = temp_index_value return array def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Max Heap """simple docstring""" _SCREAMING_SNAKE_CASE = index _SCREAMING_SNAKE_CASE = 2 * index + 1 # Left Node _SCREAMING_SNAKE_CASE = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _SCREAMING_SNAKE_CASE = left_index if right_index < heap_size and array[largest] < array[right_index]: _SCREAMING_SNAKE_CASE = right_index if largest != index: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[largest], array[index] heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list: """simple docstring""" _SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) for i in range(n // 2 , -1 , -1 ): heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in range(n - 1 , 0 , -1 ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[0], array[i] heapify(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ ) return array def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = low _SCREAMING_SNAKE_CASE = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[j], array[i] i += 1 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) == 0: return array _SCREAMING_SNAKE_CASE = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE_ ) ) ) _SCREAMING_SNAKE_CASE = 16 return intro_sort(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(SCREAMING_SNAKE_CASE_ ) max_depth -= 1 _SCREAMING_SNAKE_CASE = median_of_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , start + ((end - start) // 2) + 1 , end - 1 ) _SCREAMING_SNAKE_CASE = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) intro_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = p return insertion_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Tuple = input("Enter numbers separated by a comma : ").strip() UpperCamelCase__ : List[Any] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
591
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _a (_lowerCamelCase): """simple docstring""" def UpperCamelCase ( self ) -> Dict: _SCREAMING_SNAKE_CASE = tempfile.mkdtemp() _SCREAMING_SNAKE_CASE = 8 # DPR tok _SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """dpr_tokenizer""" ) os.makedirs(A__ , exist_ok=A__ ) _SCREAMING_SNAKE_CASE = os.path.join(A__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok _SCREAMING_SNAKE_CASE = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _SCREAMING_SNAKE_CASE = dict(zip(A__ , range(len(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""} _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """bart_tokenizer""" ) os.makedirs(A__ , exist_ok=A__ ) _SCREAMING_SNAKE_CASE = os.path.join(A__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] ) _SCREAMING_SNAKE_CASE = os.path.join(A__ , BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A__ ) ) def UpperCamelCase ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def UpperCamelCase ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) ) def UpperCamelCase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """rag_tokenizer""" ) _SCREAMING_SNAKE_CASE = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) _SCREAMING_SNAKE_CASE = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(A__ ) rag_tokenizer.save_pretrained(A__ ) _SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(A__ , config=A__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , A__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , A__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" ) _SCREAMING_SNAKE_CASE = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] _SCREAMING_SNAKE_CASE = tokenizer(A__ ) self.assertIsNotNone(A__ ) @slow def UpperCamelCase ( self ) -> int: _SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" ) _SCREAMING_SNAKE_CASE = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] _SCREAMING_SNAKE_CASE = tokenizer(A__ ) self.assertIsNotNone(A__ )
591
1
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCamelCase_ : def _snake_case ( self :Any , __A :Tuple ) -> Optional[Any]: """simple docstring""" raise NotImplementedError() def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" raise NotImplementedError() class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Optional[int] , __A :"AutoTokenizer" , __A :bool = False , **__A :Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = tokenizer SCREAMING_SNAKE_CASE__ = skip_prompt SCREAMING_SNAKE_CASE__ = decode_kwargs # variables used in the streaming process SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = True def _snake_case ( self :Optional[Any] , __A :List[str] ) -> Union[str, Any]: """simple docstring""" if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: SCREAMING_SNAKE_CASE__ = value[0] if self.skip_prompt and self.next_tokens_are_prompt: SCREAMING_SNAKE_CASE__ = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): SCREAMING_SNAKE_CASE__ = text[self.print_len :] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 # If the last token is a CJK character, we print the characters. elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): SCREAMING_SNAKE_CASE__ = text[self.print_len :] self.print_len += len(__A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: SCREAMING_SNAKE_CASE__ = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(__A ) self.on_finalized_text(__A ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" if len(self.token_cache ) > 0: SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) SCREAMING_SNAKE_CASE__ = text[self.print_len :] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 else: SCREAMING_SNAKE_CASE__ = """""" SCREAMING_SNAKE_CASE__ = True self.on_finalized_text(__A , stream_end=__A ) def _snake_case ( self :int , __A :str , __A :bool = False ) -> Union[str, Any]: """simple docstring""" print(__A , flush=__A , end="""""" if not stream_end else None ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] ) -> Union[str, Any]: """simple docstring""" if ( (cp >= 0X4E_00 and cp <= 0X9F_FF) or (cp >= 0X34_00 and cp <= 0X4D_BF) # or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) # or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) # or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) # or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) # or (cp >= 0XF9_00 and cp <= 0XFA_FF) or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) # ): # return True return False class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :List[Any] , __A :"AutoTokenizer" , __A :bool = False , __A :Optional[float] = None , **__A :Optional[int] ) -> List[Any]: """simple docstring""" super().__init__(__A , __A , **__A ) SCREAMING_SNAKE_CASE__ = Queue() SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = timeout def _snake_case ( self :str , __A :str , __A :bool = False ) -> str: """simple docstring""" self.text_queue.put(__A , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self :List[str] ) -> Tuple: """simple docstring""" return self def _snake_case ( self :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
59
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] ): SCREAMING_SNAKE_CASE__ = [] for data in source_data: for i, el in enumerate(UpperCamelCase__ ): if len(UpperCamelCase__ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(UpperCamelCase__ ) ) return data_lists def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] , UpperCamelCase__: list[int] ): SCREAMING_SNAKE_CASE__ = [] for dlist, weight in zip(UpperCamelCase__ , UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = max(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: SCREAMING_SNAKE_CASE__ = f'''Invalid weight of {weight:f} provided''' raise ValueError(UpperCamelCase__ ) score_lists.append(UpperCamelCase__ ) return score_lists def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] ): SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] , UpperCamelCase__: list[int] ): SCREAMING_SNAKE_CASE__ = get_data(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = calculate_each_score(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = generate_final_scores(UpperCamelCase__ ) # append scores to source data for i, ele in enumerate(UpperCamelCase__ ): source_data[i].append(UpperCamelCase__ ) return source_data
59
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_lowerCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_lowerCamelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
46
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __UpperCamelCase (unittest.TestCase ): def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: '''simple docstring''' self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertAlmostEqual(_lowerCAmelCase , _lowerCAmelCase , delta=_lowerCAmelCase ) def _a ( self ) -> Optional[int]: '''simple docstring''' lowercase = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_lowerCAmelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def _a ( self ) -> int: '''simple docstring''' lowercase = None ops.enable_eager_execution_internal() lowercase = tf.config.list_physical_devices("""CPU""" ) if len(_lowerCAmelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowercase = tf.config.list_logical_devices(device_type="""CPU""" ) lowercase = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowercase = GradientAccumulator() lowercase = tf.Variable([4.0, 3.0] ) lowercase , lowercase = create_optimizer(5E-5 , 10 , 5 ) lowercase = tf.Variable([0.0, 0.0] , trainable=_lowerCAmelCase ) def accumulate_on_replica(_lowerCAmelCase ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(_lowerCAmelCase , _lowerCAmelCase ): with strategy.scope(): lowercase = strategy.experimental_local_results(_lowerCAmelCase ) local_variables[0].assign(_lowerCAmelCase ) local_variables[1].assign(_lowerCAmelCase ) strategy.run(_lowerCAmelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_lowerCAmelCase ) def _check_local_values(_lowerCAmelCase , _lowerCAmelCase ): lowercase = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , _lowerCAmelCase , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , _lowerCAmelCase , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
588
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
220
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" A = 0 A = False A = 3.0 class _a ( unittest.TestCase ): """simple docstring""" def __a ( self ): # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() ,{} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() ,{'a': 2} ) self.assertDictEqual(MockClass(a=2 ,b=__SCREAMING_SNAKE_CASE ).to_kwargs() ,{'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 ,c=2.25 ).to_kwargs() ,{'a': 2, 'c': 2.25} ) @require_cuda def __a ( self ): # If no defaults are changed, `to_kwargs` returns an empty dict. SCREAMING_SNAKE_CASE : Union[str, Any] = GradScalerKwargs(init_scale=1024 ,growth_factor=2 ) AcceleratorState._reset_state() SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(mixed_precision='fp16' ,kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) SCREAMING_SNAKE_CASE : int = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale ,1024.0 ) self.assertEqual(scaler._growth_factor ,2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor ,0.5 ) self.assertEqual(scaler._growth_interval ,2000 ) self.assertEqual(scaler._enabled ,__SCREAMING_SNAKE_CASE ) @require_multi_gpu def __a ( self ): SCREAMING_SNAKE_CASE : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(__SCREAMING_SNAKE_CASE ,env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) __UpperCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler]) __UpperCAmelCase = torch.nn.Linear(100, 200) __UpperCAmelCase = accelerator.prepare(model) # Check the values changed in kwargs __UpperCAmelCase = '' __UpperCAmelCase = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
220
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self : List[Any] ,**lowercase__ : int ): __lowercase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowercase__ ) return config def SCREAMING_SNAKE_CASE ( self : Any ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] ,[0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowercase__ ,beta_end=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): self.check_over_configs(thresholding=lowercase__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowercase__ ,prediction_type=lowercase__ ,sample_max_value=lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1e-5 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) __lowercase = len(lowercase__ ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for t in reversed(range(lowercase__ ) ): # 1. predict noise residual __lowercase = model(lowercase__ ,lowercase__ ) # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,generator=lowercase__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(lowercase__ ) ) __lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' ) __lowercase = scheduler_class(**lowercase__ ) __lowercase = len(lowercase__ ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for t in reversed(range(lowercase__ ) ): # 1. predict noise residual __lowercase = model(lowercase__ ,lowercase__ ) # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,generator=lowercase__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(lowercase__ ) ) __lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) __lowercase = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=lowercase__ ) __lowercase = scheduler.timesteps for i, timestep in enumerate(lowercase__ ): if i == len(lowercase__ ) - 1: __lowercase = -1 else: __lowercase = timesteps[i + 1] __lowercase = scheduler.previous_timestep(lowercase__ ) __lowercase = prev_t.item() self.assertEqual(lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) __lowercase = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(lowercase__ ,msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) __lowercase = [1_0_0, 8_7, 5_0, 1, 0] __lowercase = len(lowercase__ ) with self.assertRaises(lowercase__ ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowercase__ ,timesteps=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) __lowercase = [scheduler.config.num_train_timesteps] with self.assertRaises( lowercase__ ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,): scheduler.set_timesteps(timesteps=lowercase__ )
41
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def lowerCamelCase_( ) -> None: '''simple docstring''' print("Making key files..." ) make_key_files("rsa" , 1024 ) print("Key files generation successful." ) def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]: '''simple docstring''' print("Generating prime p..." ) _lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase ) print("Generating prime q..." ) _lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase ) _lowerCamelCase : Dict = p * q print("Generating e that is relatively prime to (p - 1) * (q - 1)..." ) while True: _lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1: break print("Calculating d that is mod inverse of e..." ) _lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) ) _lowerCamelCase : Dict = (n, e) _lowerCamelCase : Dict = (n, d) return (public_key, private_key) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None: '''simple docstring''' if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("\nWARNING:" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" "Use a different name or delete these files and re-run this program." ) sys.exit() _lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , "w" ) as out_file: out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , "w" ) as out_file: out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
46
0
from __future__ import annotations __UpperCAmelCase : Any = [] def lowercase_ ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int ) -> bool: '''simple docstring''' for i in range(len(__snake_case ) ): if board[row][i] == 1: return False for i in range(len(__snake_case ) ): if board[i][column] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ): if board[i][j] == 1: return False return True def lowercase_ ( __snake_case : list[list[int]] , __snake_case : int ) -> bool: '''simple docstring''' if row >= len(__snake_case ): solution.append(__snake_case ) printboard(__snake_case ) print() return True for i in range(len(__snake_case ) ): if is_safe(__snake_case , __snake_case , __snake_case ): snake_case__ :Union[str, Any] = 1 solve(__snake_case , row + 1 ) snake_case__ :int = 0 return False def lowercase_ ( __snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in range(len(__snake_case ) ): for j in range(len(__snake_case ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) __UpperCAmelCase : Tuple = 8 __UpperCAmelCase : Optional[Any] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
712
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase : Optional[Any] = 1_6 __UpperCAmelCase : Optional[int] = 3_2 def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case ) snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case__ :List[Any] = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case__ :Any = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) snake_case__ :Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple: '''simple docstring''' model.eval() snake_case__ :Union[str, Any] = 0 for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ :List[Any] = model(**__snake_case ) snake_case__ :Any = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case__ , snake_case__ :Tuple = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__snake_case ) - 1: snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__snake_case , references=__snake_case , ) snake_case__ :int = metric.compute() return eval_metric["accuracy"] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any: '''simple docstring''' snake_case__ :Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ :Union[str, Any] = config["lr"] snake_case__ :List[str] = int(config["num_epochs"] ) snake_case__ :Optional[Any] = int(config["seed"] ) snake_case__ :List[Any] = int(config["batch_size"] ) snake_case__ :List[Any] = args.model_name_or_path set_seed(__snake_case ) snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case ) # Instantiate optimizer snake_case__ :int = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case ) if accelerator.state.deepspeed_plugin is not None: snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case__ :Any = 1 snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case__ :Optional[Any] = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , ) else: snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # We need to keep track of how many total steps we have iterated over snake_case__ :Dict = 0 # We also need to keep track of the stating epoch so files are named properly snake_case__ :Union[str, Any] = 0 snake_case__ :List[str] = evaluate.load("glue" , "mrpc" ) snake_case__ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: snake_case__ :List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1] snake_case__ :Dict = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break snake_case__ :str = int(__snake_case ) + 1 snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) accelerator.print("resumed checkpoint performance:" , __snake_case ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f: snake_case__ :Tuple = json.load(__snake_case ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model snake_case__ :Optional[int] = {} for epoch in range(__snake_case , __snake_case ): model.train() for step, batch in enumerate(__snake_case ): snake_case__ :str = model(**__snake_case ) snake_case__ :List[str] = outputs.loss snake_case__ :List[Any] = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 snake_case__ :int = F'epoch_{epoch}' snake_case__ :str = os.path.join(args.output_dir , __snake_case ) accelerator.save_state(__snake_case ) snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ :List[str] = accuracy snake_case__ :List[str] = lr_scheduler.get_lr()[0] snake_case__ :List[Any] = optimizer.param_groups[0]["lr"] snake_case__ :Dict = epoch snake_case__ :List[Any] = overall_step accelerator.print(F'epoch {epoch}:' , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f: json.dump(__snake_case , __snake_case ) def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , ) snake_case__ :Any = parser.parse_args() snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
57
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING snake_case__ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class _a ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _UpperCAmelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> List[str]: UpperCamelCase_ = {} UpperCamelCase_ = {} if prompt is not None: UpperCamelCase_ = prompt if generate_kwargs is not None: UpperCamelCase_ = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: UpperCamelCase_ = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) UpperCamelCase_ = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _UpperCAmelCase , **_UpperCAmelCase ) -> int: return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> str: UpperCamelCase_ = load_image(_UpperCAmelCase ) if prompt is not None: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( f"""Received an invalid text input, got - {type(_UpperCAmelCase )} - but expected a single string. """ 'Note also that one single text can be provided for conditional image to text generation.' ) UpperCamelCase_ = self.model.config.model_type if model_type == "git": UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) UpperCamelCase_ = self.tokenizer(text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids UpperCamelCase_ = [self.tokenizer.cls_token_id] + input_ids UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , header_text=_UpperCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) UpperCamelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework ) model_inputs.update(_UpperCAmelCase ) else: raise ValueError(f"""Model type {model_type} does not support conditional text generation""" ) else: UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: UpperCamelCase_ = None return model_inputs def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Any: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _UpperCAmelCase ) and all(x is None for x in model_inputs['input_ids'] ) ): UpperCamelCase_ = None if generate_kwargs is None: UpperCamelCase_ = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. UpperCamelCase_ = model_inputs.pop(self.model.main_input_name ) UpperCamelCase_ = self.model.generate(_UpperCAmelCase , **_UpperCAmelCase , **_UpperCAmelCase ) return model_outputs def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Any: UpperCamelCase_ = [] for output_ids in model_outputs: UpperCamelCase_ = { 'generated_text': self.tokenizer.decode( _UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , ) } records.append(_UpperCAmelCase ) return records
23
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : Dict = 1_6 snake_case__ : List[str] = 3_2 def _snake_case (__lowercase , __lowercase = 16): UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased') UpperCamelCase_ = load_dataset('glue' , 'mrpc') def tokenize_function(__lowercase): # max_length=None => use the model max length (it's actually the default) UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase_ = datasets.map( __lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(__lowercase): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCamelCase_ = 8 else: UpperCamelCase_ = None return tokenizer.pad( __lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase_ = DataLoader( tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) UpperCamelCase_ = DataLoader( tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ : List[str] = mocked_dataloaders # noqa: F811 def _snake_case (__lowercase , __lowercase): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1": UpperCamelCase_ = 2 # New Code # UpperCamelCase_ = int(args.gradient_accumulation_steps) # Initialize accelerator UpperCamelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`') # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase_ = config['lr'] UpperCamelCase_ = int(config['num_epochs']) UpperCamelCase_ = int(config['seed']) UpperCamelCase_ = int(config['batch_size']) UpperCamelCase_ = evaluate.load('glue' , 'mrpc') set_seed(__lowercase) UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase_ = model.to(accelerator.device) # Instantiate optimizer UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase) # Instantiate scheduler UpperCamelCase_ = get_linear_schedule_with_warmup( optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # Now we train the model for epoch in range(__lowercase): model.train() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowercase): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = output.loss accelerator.backward(__lowercase) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = outputs.logits.argmax(dim=-1) UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=__lowercase , references=__lowercase , ) UpperCamelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowercase) def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__lowercase , __lowercase) if __name__ == "__main__": main()
23
1
'''simple docstring''' import re def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if len(re.findall('[ATCG]' , snake_case_ ) ) != len(snake_case_ ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
718
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCamelCase ( A,unittest.TestCase ): '''simple docstring''' a_ : str = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def _snake_case ( self : str , _lowerCamelCase : Optional[Any]=0 ): '''simple docstring''' __lowerCamelCase : str = np.random.RandomState(_lowerCamelCase ) __lowerCamelCase : str = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : Optional[Any] = self.get_dummy_inputs() __lowerCamelCase : int = pipe(**_lowerCamelCase ).images __lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : Union[str, Any] = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowerCamelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowerCamelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : List[str] = self.get_dummy_inputs() __lowerCamelCase : Any = pipe(**_lowerCamelCase ).images __lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : List[str] = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowerCamelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowerCamelCase : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : Any = self.get_dummy_inputs() __lowerCamelCase : List[Any] = pipe(**_lowerCamelCase ).images __lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : Optional[int] = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowerCamelCase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : List[str] = self.get_dummy_inputs() __lowerCamelCase : Dict = pipe(**_lowerCamelCase ).images __lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : List[str] = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : Any ): '''simple docstring''' __lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowerCamelCase : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : Dict = self.get_dummy_inputs() __lowerCamelCase : Union[str, Any] = pipe(**_lowerCamelCase ).images __lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : Dict = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowerCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : List[str] = self.get_dummy_inputs() __lowerCamelCase : str = pipe(**_lowerCamelCase ).images __lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) __lowerCamelCase : int = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : Any ): '''simple docstring''' __lowerCamelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : List[Any] = self.get_dummy_inputs() __lowerCamelCase : int = 3 * [inputs["""prompt"""]] # forward __lowerCamelCase : Optional[Any] = pipe(**_lowerCamelCase ) __lowerCamelCase : List[Any] = output.images[0, -3:, -3:, -1] __lowerCamelCase : str = self.get_dummy_inputs() __lowerCamelCase : List[str] = 3 * [inputs.pop("""prompt""" )] __lowerCamelCase : Optional[Any] = pipe.tokenizer( _lowerCamelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="""np""" , ) __lowerCamelCase : List[Any] = text_inputs["""input_ids"""] __lowerCamelCase : List[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] __lowerCamelCase : Optional[int] = prompt_embeds # forward __lowerCamelCase : str = pipe(**_lowerCamelCase ) __lowerCamelCase : Any = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def _snake_case ( self : Tuple ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : List[str] = self.get_dummy_inputs() __lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""] __lowerCamelCase : Optional[Any] = negative_prompt __lowerCamelCase : int = 3 * [inputs["""prompt"""]] # forward __lowerCamelCase : Tuple = pipe(**_lowerCamelCase ) __lowerCamelCase : Optional[Any] = output.images[0, -3:, -3:, -1] __lowerCamelCase : int = self.get_dummy_inputs() __lowerCamelCase : int = 3 * [inputs.pop("""prompt""" )] __lowerCamelCase : str = [] for p in [prompt, negative_prompt]: __lowerCamelCase : Tuple = pipe.tokenizer( _lowerCamelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="""np""" , ) __lowerCamelCase : int = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) __lowerCamelCase , __lowerCamelCase : int = embeds # forward __lowerCamelCase : Tuple = pipe(**_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def _snake_case ( self : Any ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _snake_case ( self : str ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = ort.SessionOptions() __lowerCamelCase : Optional[Any] = False return options def _snake_case ( self : List[Any] ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : List[str] = """A painting of a squirrel eating a burger""" np.random.seed(0 ) __lowerCamelCase : Tuple = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type="""np""" ) __lowerCamelCase : Dict = output.images __lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCamelCase : List[Any] = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : Dict = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : Dict = """open neural network exchange""" __lowerCamelCase : List[Any] = np.random.RandomState(0 ) __lowerCamelCase : Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowerCamelCase , output_type="""np""" ) __lowerCamelCase : str = output.images __lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCamelCase : List[Any] = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowerCamelCase : Any = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : Optional[int] = """open neural network exchange""" __lowerCamelCase : str = np.random.RandomState(0 ) __lowerCamelCase : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowerCamelCase , output_type="""np""" ) __lowerCamelCase : List[Any] = output.images __lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCamelCase : List[str] = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _snake_case ( self : List[Any] ): '''simple docstring''' __lowerCamelCase : int = 0 def test_callback_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : np.ndarray ) -> None: __lowerCamelCase : str = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 6_4, 6_4) __lowerCamelCase : Tuple = latents[0, -3:, -3:, -1] __lowerCamelCase : Tuple = np.array( [-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 6_4, 6_4) __lowerCamelCase : Optional[Any] = latents[0, -3:, -3:, -1] __lowerCamelCase : str = np.array( [-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : int = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowerCamelCase : str = """Andromeda galaxy in a bottle""" __lowerCamelCase : Optional[Any] = np.random.RandomState(0 ) pipe( prompt=_lowerCamelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _snake_case ( self : List[str] ): '''simple docstring''' __lowerCamelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert pipe.safety_checker is None __lowerCamelCase : str = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCamelCase ) __lowerCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(_lowerCamelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowerCamelCase : Optional[int] = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
519
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __UpperCamelCase : Optional[Any] = 6_378_137.0 __UpperCamelCase : Any = 6_356_752.314_245 __UpperCamelCase : Optional[int] = 6378137 def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ): """simple docstring""" __lowerCamelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __lowerCamelCase : Tuple = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) ) __lowerCamelCase : Dict = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __lowerCamelCase : Any = haversine_distance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __lowerCamelCase : Dict = (b_lata + b_lata) / 2 __lowerCamelCase : Union[str, Any] = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __lowerCamelCase : str = (sin(UpperCAmelCase ) ** 2) * (cos(UpperCAmelCase ) ** 2) __lowerCamelCase : List[Any] = cos(sigma / 2 ) ** 2 __lowerCamelCase : Dict = (sigma - sin(UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __lowerCamelCase : Tuple = (cos(UpperCAmelCase ) ** 2) * (sin(UpperCAmelCase ) ** 2) __lowerCamelCase : List[str] = sin(sigma / 2 ) ** 2 __lowerCamelCase : List[str] = (sigma + sin(UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
519
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : List[Any] =get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = PegasusTokenizer _snake_case = PegasusTokenizerFast _snake_case = True _snake_case = True def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : List[Any] = PegasusTokenizer(_UpperCamelCase) tokenizer.save_pretrained(self.tmpdirname) @cached_property def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" return PegasusTokenizer.from_pretrained("""google/pegasus-large""") def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Tuple) ->PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[Any]) ->Dict: """simple docstring""" return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[int] = """</s>""" _lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<pad>""") self.assertEqual(vocab_keys[1] , """</s>""") self.assertEqual(vocab_keys[-1] , """v""") self.assertEqual(len(_UpperCamelCase) , 1103) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" _lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : Optional[int] = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) _lowerCamelCase : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] _lowerCamelCase : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word _lowerCamelCase : str = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" _lowerCamelCase : Optional[int] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] _lowerCamelCase : List[Any] = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: """simple docstring""" _lowerCamelCase : int = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 _lowerCamelCase : Any = """To ensure a smooth flow of bank resolutions.""" _lowerCamelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] _lowerCamelCase : Any = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict) ->int: """simple docstring""" _lowerCamelCase : Optional[int] = ["""This is going to be way too long.""" * 150, """short example"""] _lowerCamelCase : Dict = ["""not super long but more than 5 tokens""", """tiny"""] _lowerCamelCase : List[Any] = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") _lowerCamelCase : int = self._large_tokenizer( text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCamelCase) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = PegasusTokenizer _snake_case = PegasusTokenizerFast _snake_case = True _snake_case = True def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Union[str, Any] = PegasusTokenizer(_UpperCamelCase , offset=0 , mask_token_sent=_UpperCamelCase , mask_token="""[MASK]""") tokenizer.save_pretrained(self.tmpdirname) @cached_property def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""") def _SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCamelCase : List[Any]) ->PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Dict) ->List[Any]: """simple docstring""" return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : int = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) _lowerCamelCase : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] _lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: """simple docstring""" _lowerCamelCase : List[Any] = ["""This is going to be way too long.""" * 1000, """short example"""] _lowerCamelCase : Tuple = ["""not super long but more than 5 tokens""", """tiny"""] _lowerCamelCase : Optional[int] = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") _lowerCamelCase : List[str] = self._large_tokenizer( text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCamelCase) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str: """simple docstring""" _lowerCamelCase : Tuple = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) _lowerCamelCase : int = self._large_tokenizer(_UpperCamelCase).input_ids self.assertListEqual( _UpperCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
15
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _SCREAMING_SNAKE_CASE = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize _SCREAMING_SNAKE_CASE = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" _SCREAMING_SNAKE_CASE = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" _SCREAMING_SNAKE_CASE = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]: import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0.9 , _lowerCAmelCase=3 , _lowerCAmelCase=0.5 ) -> int: if NLTK_VERSION >= version.Version("3.6.5" ): _lowerCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(_lowerCAmelCase ) , word_tokenize(_lowerCAmelCase ) , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , gamma=_lowerCAmelCase ) for ref, pred in zip(_lowerCAmelCase , _lowerCAmelCase ) ] else: _lowerCAmelCase = [ meteor_score.single_meteor_score(_lowerCAmelCase , _lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , gamma=_lowerCAmelCase ) for ref, pred in zip(_lowerCAmelCase , _lowerCAmelCase ) ] return {"meteor": np.mean(_lowerCAmelCase )}
18
'''simple docstring''' import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" _SCREAMING_SNAKE_CASE = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str: if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] ) _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] ) else: _lowerCAmelCase = np.asarray(_lowerCAmelCase ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) if ignore_case: _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: _lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) if ignore_numbers: _lowerCAmelCase = string.digits.maketrans("" , "" , string.digits ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
18
1
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): return getitem, k def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return setitem, k, v def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): return delitem, k def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ): try: return fun(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ), None except Exception as e: return None, e UpperCamelCase = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) UpperCamelCase = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] UpperCamelCase = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] UpperCamelCase = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] UpperCamelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] UpperCamelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : Optional[Any] = HashMap(initial_block_size=4 ) A_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE ): A_ : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) A_ : int = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) assert my_res == py_res assert str(SCREAMING_SNAKE_CASE ) == str(SCREAMING_SNAKE_CASE ) assert set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE ) assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) assert set(my.items() ) == set(py.items() ) def _SCREAMING_SNAKE_CASE ( ): def is_public(SCREAMING_SNAKE_CASE ) -> bool: return not name.startswith('''_''' ) A_ : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE )} A_ : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE )} assert dict_public_names > hash_public_names
706
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase = {ord(char) for char in VALID_CHARS} UpperCamelCase = ["the", "be", "to", "of", "and", "in", "that", "have"] def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : str = "" A_ : int A_ : int A_ : int for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ): A_ : Tuple = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(SCREAMING_SNAKE_CASE ) return decoded def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : list[str] = [] for key in product(SCREAMING_SNAKE_CASE , repeat=3 ): A_ : Tuple = try_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if encoded is not None: possibles.append(SCREAMING_SNAKE_CASE ) return possibles def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return [possible for possible in possibles if common_word in possible.lower()] def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "p059_cipher.txt" ): A_ : list[int] A_ : list[str] A_ : str A_ : str A_ : str = Path(SCREAMING_SNAKE_CASE ).parent.joinpath(SCREAMING_SNAKE_CASE ).read_text(encoding='''utf-8''' ) A_ : Optional[Any] = [int(SCREAMING_SNAKE_CASE ) for number in data.strip().split(''',''' )] A_ : Optional[int] = filter_valid_chars(SCREAMING_SNAKE_CASE ) for common_word in COMMON_WORDS: A_ : Optional[Any] = filter_common_word(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) == 1: break A_ : int = possibles[0] return sum(ord(SCREAMING_SNAKE_CASE ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
152
0
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE : List[str] = 10 def UpperCamelCase ( self : Optional[int] , **UpperCamelCase__ : List[str] ): A = { """num_train_timesteps""": 201, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**snake_case_ ) return config def UpperCamelCase ( self : Tuple ): A = 10 A = self.get_scheduler_config() A = self.scheduler_classes[0](**snake_case_ ) scheduler.set_timesteps(snake_case_ ) A = scheduler.timesteps[0] A = scheduler.timesteps[1] A = self.dummy_sample A = 0.1 * sample A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase ( self : Optional[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case_ ) def UpperCamelCase ( self : Optional[Any] ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=snake_case_ ) def UpperCamelCase ( self : Any ): A = self.scheduler_classes[0] A = self.get_scheduler_config() A = scheduler_class(**snake_case_ ) A = 1 scheduler.set_timesteps(snake_case_ ) A = scheduler.timesteps A = torch.manual_seed(0 ) A = self.dummy_model() A = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(snake_case_ ): # 1. scale model input A = scheduler.scale_model_input(snake_case_ , snake_case_ ) # 2. predict noise residual A = model(snake_case_ , snake_case_ ) # 3. predict previous sample x_t-1 A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample A = pred_prev_sample A = torch.sum(torch.abs(snake_case_ ) ) A = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def UpperCamelCase ( self : str ): A = self.scheduler_classes[0] A = self.get_scheduler_config() A = scheduler_class(**snake_case_ ) A = [106, 0] scheduler.set_timesteps(timesteps=snake_case_ ) A = scheduler.timesteps A = torch.manual_seed(0 ) A = self.dummy_model() A = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input A = scheduler.scale_model_input(snake_case_ , snake_case_ ) # 2. predict noise residual A = model(snake_case_ , snake_case_ ) # 3. predict previous sample x_t-1 A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample A = pred_prev_sample A = torch.sum(torch.abs(snake_case_ ) ) A = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def UpperCamelCase ( self : str ): A = self.scheduler_classes[0] A = self.get_scheduler_config() A = scheduler_class(**snake_case_ ) A = [39, 30, 12, 15, 0] with self.assertRaises(snake_case_ , msg='`timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=snake_case_ ) def UpperCamelCase ( self : str ): A = self.scheduler_classes[0] A = self.get_scheduler_config() A = scheduler_class(**snake_case_ ) A = [39, 30, 12, 1, 0] A = len(snake_case_ ) with self.assertRaises(snake_case_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ): scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ ) def UpperCamelCase ( self : int ): A = self.scheduler_classes[0] A = self.get_scheduler_config() A = scheduler_class(**snake_case_ ) A = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=snake_case_ )
699
from __future__ import annotations import time lowerCamelCase_ : Union[str, Any] = list[tuple[int, int]] lowerCamelCase_ : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : Node | None ): UpperCamelCase_: str = pos_x UpperCamelCase_: List[str] = pos_y UpperCamelCase_: str = (pos_y, pos_x) UpperCamelCase_: Any = goal_x UpperCamelCase_: Optional[int] = goal_y UpperCamelCase_: Union[str, Any] = parent class _UpperCamelCase : '''simple docstring''' def __init__( self : str , snake_case_ : tuple[int, int] , snake_case_ : tuple[int, int] ): UpperCamelCase_: Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , snake_case_ ) UpperCamelCase_: List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , snake_case_ ) UpperCamelCase_: Any = [self.start] UpperCamelCase_: Dict = False def lowerCAmelCase__ ( self : str ): while self.node_queue: UpperCamelCase_: int = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCamelCase_: List[str] = True return self.retrace_path(snake_case_ ) UpperCamelCase_: List[str] = self.get_successors(snake_case_ ) for node in successors: self.node_queue.append(snake_case_ ) if not self.reached: return [self.start.pos] return None def lowerCAmelCase__ ( self : Dict , snake_case_ : Node ): UpperCamelCase_: int = [] for action in delta: UpperCamelCase_: Union[str, Any] = parent.pos_x + action[1] UpperCamelCase_: str = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , snake_case_ ) ) return successors def lowerCAmelCase__ ( self : int , snake_case_ : Node | None ): UpperCamelCase_: int = node UpperCamelCase_: Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCamelCase_: List[Any] = current_node.parent path.reverse() return path class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): UpperCamelCase_: Tuple = BreadthFirstSearch(snake_case_ , snake_case_ ) UpperCamelCase_: Dict = BreadthFirstSearch(snake_case_ , snake_case_ ) UpperCamelCase_: int = False def lowerCAmelCase__ ( self : int ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCamelCase_: List[Any] = self.fwd_bfs.node_queue.pop(0 ) UpperCamelCase_: Optional[Any] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCamelCase_: List[Any] = True return self.retrace_bidirectional_path( snake_case_ , snake_case_ ) UpperCamelCase_: Optional[Any] = current_bwd_node UpperCamelCase_: List[Any] = current_fwd_node UpperCamelCase_: List[str] = { self.fwd_bfs: self.fwd_bfs.get_successors(snake_case_ ), self.bwd_bfs: self.bwd_bfs.get_successors(snake_case_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(snake_case_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowerCAmelCase__ ( self : List[str] , snake_case_ : Node , snake_case_ : Node ): UpperCamelCase_: List[str] = self.fwd_bfs.retrace_path(snake_case_ ) UpperCamelCase_: Tuple = self.bwd_bfs.retrace_path(snake_case_ ) bwd_path.pop() bwd_path.reverse() UpperCamelCase_: List[str] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase_ : Tuple = (0, 0) lowerCamelCase_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase_ : Optional[int] = time.time() lowerCamelCase_ : Tuple = BreadthFirstSearch(init, goal) lowerCamelCase_ : List[str] = bfs.search() lowerCamelCase_ : Optional[int] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) lowerCamelCase_ : Optional[int] = time.time() lowerCamelCase_ : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase_ : str = bd_bfs.search() lowerCamelCase_ : Tuple = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
548
0
def __lowerCamelCase (UpperCAmelCase__ : int ): assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer" if number == 1: return 2 elif number < 1: SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0" raise ValueError(UpperCAmelCase__ ) else: SCREAMING_SNAKE_CASE = sylvester(number - 1 ) SCREAMING_SNAKE_CASE = num - 1 SCREAMING_SNAKE_CASE = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
647
from __future__ import annotations import math def __lowerCamelCase (UpperCAmelCase__ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True _lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def __lowerCamelCase (UpperCAmelCase__ : int ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) SCREAMING_SNAKE_CASE = [] for num in range(len(UpperCAmelCase__ ) ): SCREAMING_SNAKE_CASE = 0 while 2 * i * i <= odd_composites[num]: SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i if is_prime(UpperCAmelCase__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(UpperCAmelCase__ ) == n: return list_nums return [] def __lowerCamelCase (): return compute_nums(1 )[0] if __name__ == "__main__": print(f"""{solution() = }""")
647
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class snake_case__ ( UpperCamelCase_ ): _lowerCAmelCase =['input_features', 'is_longer'] def __init__( self : int , _lowerCamelCase : Union[str, Any]=6_4 , _lowerCamelCase : Any=4_8_0_0_0 , _lowerCamelCase : Optional[int]=4_8_0 , _lowerCamelCase : Optional[Any]=1_0 , _lowerCamelCase : List[str]=1_0_2_4 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : float = 0 , _lowerCamelCase : float = 1_4_0_0_0 , _lowerCamelCase : int = None , _lowerCamelCase : str = "fusion" , _lowerCamelCase : str = "repeatpad" , **_lowerCamelCase : Any , ): super().__init__( feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , ) snake_case__ : Optional[int] = top_db snake_case__ : Any = truncation snake_case__ : List[Any] = padding snake_case__ : str = fft_window_size snake_case__ : Union[str, Any] = (fft_window_size >> 1) + 1 snake_case__ : List[str] = hop_length snake_case__ : Union[str, Any] = max_length_s snake_case__ : int = max_length_s * sampling_rate snake_case__ : str = sampling_rate snake_case__ : List[Any] = frequency_min snake_case__ : Optional[Any] = frequency_max snake_case__ : Optional[int] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm=_lowerCamelCase , mel_scale='htk' , ) snake_case__ : List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm='slaney' , mel_scale='slaney' , ) def UpperCAmelCase__ ( self : Optional[Any] ): snake_case__ : Union[str, Any] = copy.deepcopy(self.__dict__ ) snake_case__ : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCAmelCase__ ( self : int , _lowerCamelCase : np.array , _lowerCamelCase : Optional[np.array] = None ): snake_case__ : Optional[int] = spectrogram( _lowerCamelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCamelCase , log_mel='dB' , ) return log_mel_spectrogram.T def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ): snake_case__ : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case__ : Any = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case__ : List[Any] = [0] # randomly choose index for each part snake_case__ : Any = np.random.choice(ranges[0] ) snake_case__ : Optional[Any] = np.random.choice(ranges[1] ) snake_case__ : int = np.random.choice(ranges[2] ) snake_case__ : List[Any] = mel[idx_front : idx_front + chunk_frames, :] snake_case__ : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :] snake_case__ : Dict = mel[idx_back : idx_back + chunk_frames, :] snake_case__ : str = torch.tensor(mel[None, None, :] ) snake_case__ : List[Any] = torch.nn.functional.interpolate( _lowerCamelCase , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=_lowerCamelCase ) snake_case__ : Dict = mel_shrink[0][0].numpy() snake_case__ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : np.array , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case__ : str = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case__ : List[Any] = len(_lowerCamelCase ) - max_length snake_case__ : Dict = np.random.randint(0 , overflow + 1 ) snake_case__ : Dict = waveform[idx : idx + max_length] snake_case__ : Dict = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case__ : Dict = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters ) snake_case__ : Optional[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case__ : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case__ : Any = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case__ : Union[str, Any] = False else: snake_case__ : Optional[int] = self._random_mel_fusion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) snake_case__ : Tuple = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case__ : List[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case__ : Tuple = int(max_length / len(_lowerCamelCase ) ) snake_case__ : int = np.stack(np.tile(_lowerCamelCase , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case__ : int = int(max_length / len(_lowerCamelCase ) ) snake_case__ : List[str] = np.stack(np.tile(_lowerCamelCase , _lowerCamelCase ) ) snake_case__ : str = np.pad(_lowerCamelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case__ : List[str] = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters ) snake_case__ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case__ : Union[str, Any] = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[str] , _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCamelCase : str = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , **_lowerCamelCase : Optional[int] , ): snake_case__ : int = truncation if truncation is not None else self.truncation snake_case__ : int = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case__ : int = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case__ : str = is_batched_numpy or ( isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case__ : Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ): snake_case__ : Optional[int] = np.asarray(_lowerCamelCase , dtype=np.floataa ) elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case__ : Union[str, Any] = [np.asarray(_lowerCamelCase )] # convert to mel spectrogram, truncate and pad if needed. snake_case__ : List[str] = [ self._get_input_mel(_lowerCamelCase , max_length if max_length else self.nb_max_samples , _lowerCamelCase , _lowerCamelCase ) for waveform in raw_speech ] snake_case__ : Any = [] snake_case__ : Optional[int] = [] for mel, longer in padded_inputs: input_mel.append(_lowerCamelCase ) is_longer.append(_lowerCamelCase ) if truncation == "fusion" and sum(_lowerCamelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case__ : Dict = np.random.randint(0 , len(_lowerCamelCase ) ) snake_case__ : Union[str, Any] = True if isinstance(input_mel[0] , _lowerCamelCase ): snake_case__ : Tuple = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case__ : Any = [[longer] for longer in is_longer] snake_case__ : Union[str, Any] = {'input_features': input_mel, 'is_longer': is_longer} snake_case__ : Any = BatchFeature(_lowerCamelCase ) if return_tensors is not None: snake_case__ : Optional[int] = input_features.convert_to_tensors(_lowerCamelCase ) return input_features
170
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowercase__( A , A , A=0 ): # Format the message. if name is None: snake_case__ : Dict = None else: snake_case__ : Optional[int] = '.' * max(0 , spaces - 2 ) + '# {:' + str(5_0 - spaces ) + 's}' snake_case__ : str = fmt.format(A ) # Print and recurse (if needed). if isinstance(A , A ): if msg is not None: print(A ) for k in val.keys(): recursive_print(A , val[k] , spaces + 2 ) elif isinstance(A , torch.Tensor ): print(A , ':' , val.size() ) else: print(A , ':' , A ) def lowercase__( A , A , A , A , A ): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. snake_case__ : Any = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] snake_case__ : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:] snake_case__ : int = param.view(*A ) snake_case__ : Tuple = param.transpose(0 , 2 ) snake_case__ : Any = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] snake_case__ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:] snake_case__ : Any = param.view(*A ) snake_case__ : Optional[int] = param.transpose(0 , 1 ).contiguous() snake_case__ : List[Any] = param.view(*A ) return param def lowercase__( A , A , A ): # The converted output model. snake_case__ : Optional[Any] = {} # old versions did not store training args snake_case__ : Any = input_state_dict.get('args' , A ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) snake_case__ : str = ds_args.padded_vocab_size snake_case__ : Any = ds_args.max_position_embeddings snake_case__ : Optional[int] = ds_args.hidden_size snake_case__ : str = ds_args.num_layers snake_case__ : List[Any] = ds_args.num_attention_heads snake_case__ : int = ds_args.ffn_hidden_size # pprint(config) # The number of heads. snake_case__ : int = config.n_head # The hidden_size per head. snake_case__ : Any = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): snake_case__ : Optional[Any] = input_state_dict['checkpoint_version'] else: snake_case__ : Tuple = 0.0 # The model. snake_case__ : Dict = input_state_dict['model'] # The language model. snake_case__ : int = model['language_model'] # The embeddings. snake_case__ : Tuple = lm['embedding'] # The word embeddings. snake_case__ : Tuple = embeddings['word_embeddings']['weight'] # Truncate the embedding table to vocab_size rows. snake_case__ : int = word_embeddings[: config.vocab_size, :] snake_case__ : List[str] = word_embeddings # The position embeddings. snake_case__ : Union[str, Any] = embeddings['position_embeddings']['weight'] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] snake_case__ : Any = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. snake_case__ : int = pos_embeddings # The transformer. snake_case__ : Optional[Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder'] # The regex to extract layer names. snake_case__ : Union[str, Any] = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' ) # The simple map of names for "automated" rules. snake_case__ : Any = { 'attention.dense': '.attn.c_proj.', 'self_attention.dense': '.attn.c_proj.', 'mlp.dense_h_to_4h': '.mlp.c_fc.', 'mlp.dense_4h_to_h': '.mlp.c_proj.', } # Extract the layers. for key, val in transformer.items(): # Match the name. snake_case__ : Dict = layer_re.match(A ) # Stop if that's not a layer if m is None: break # The index of the layer. snake_case__ : Dict = int(m.group(1 ) ) # The name of the operation. snake_case__ : List[Any] = m.group(2 ) # Is it a weight or a bias? snake_case__ : Tuple = m.group(3 ) # The name of the layer. snake_case__ : int = f'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith('layernorm' ): snake_case__ : Union[str, Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2' snake_case__ : List[Any] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. snake_case__ : List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , A , A ) snake_case__ : List[Any] = causal_mask # Insert a "dummy" tensor for masked_bias. snake_case__ : Optional[int] = torch.tensor(-1e4 , dtype=torch.floataa ) snake_case__ : int = masked_bias snake_case__ : List[Any] = fix_query_key_value_ordering(A , A , 3 , A , A ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. snake_case__ : List[Any] = out_val.transpose(0 , 1 ).contiguous() # Store. snake_case__ : Optional[Any] = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": snake_case__ : int = fix_query_key_value_ordering(A , A , 3 , A , A ) # Store. No change of shape. snake_case__ : List[Any] = out_val # Transpose the weights. elif weight_or_bias == "weight": snake_case__ : List[Any] = megatron_to_transformers[op_name] snake_case__ : Union[str, Any] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": snake_case__ : List[Any] = megatron_to_transformers[op_name] snake_case__ : str = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. snake_case__ : Any = transformer['final_layernorm.weight'] snake_case__ : Optional[int] = transformer['final_layernorm.bias'] # For LM head, transformers' wants the matrix to weight embeddings. snake_case__ : Optional[Any] = word_embeddings # It should be done! return output_state_dict def lowercase__( ): # Create the argument parser. snake_case__ : str = argparse.ArgumentParser() parser.add_argument('--print-checkpoint-structure' , action='store_true' ) parser.add_argument( 'path_to_checkpoint' , type=A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , ) parser.add_argument( '--config_file' , default='' , type=A , help='An optional config json file describing the pre-trained model.' , ) snake_case__ : Dict = parser.parse_args() # Extract the basename. snake_case__ : str = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith('.zip' ): with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint: with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict: snake_case__ : Any = torch.load(A , map_location='cpu' ) else: snake_case__ : Optional[int] = torch.load(args.path_to_checkpoint , map_location='cpu' ) snake_case__ : List[Any] = input_state_dict.get('args' , A ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: snake_case__ : Optional[int] = 'gelu_fast' elif ds_args.openai_gelu: snake_case__ : Optional[Any] = 'gelu_new' else: snake_case__ : str = 'gelu' else: # in the very early days this used to be "gelu_new" snake_case__ : List[str] = 'gelu_new' # Spell out all parameters in case the defaults change. snake_case__ : str = GPTaConfig( vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=A , summary_activation=A , summary_proj_to_labels=A , summary_first_dropout=0.1 , scale_attn_weights=A , use_cache=A , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , ) else: snake_case__ : str = GPTaConfig.from_json_file(args.config_file ) snake_case__ : Optional[Any] = ['GPT2LMHeadModel'] # Convert. print('Converting' ) snake_case__ : Dict = convert_megatron_checkpoint(A , A , A ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(A , A ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: snake_case__ : Union[str, Any] = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": snake_case__ : Optional[int] = 'gpt2' elif tokenizer_type == "PretrainedFromHF": snake_case__ : Optional[Any] = ds_args.tokenizer_name_or_path else: raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: snake_case__ : str = 'gpt2' snake_case__ : int = AutoTokenizer.from_pretrained(A ) snake_case__ : Optional[int] = type(A ).__name__ snake_case__ : Tuple = tokenizer_class # Store the config to file. print('Saving config' ) config.save_pretrained(A ) # Save tokenizer based on args print(f'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(A ) # Store the state_dict to file. snake_case__ : Optional[Any] = os.path.join(A , 'pytorch_model.bin' ) print(f'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(A , A ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
170
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :int = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. lowerCAmelCase_ :Tuple = json.loads(__snake_case ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. lowerCAmelCase_ :Dict = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". lowerCAmelCase_ :List[str] = json.loads(__snake_case ) if not mpi_options.get("""sagemaker_mpi_enabled""" , __snake_case ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("""smdistributed""" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class _SCREAMING_SNAKE_CASE ( _A ): UpperCAmelCase_ :List[Any] = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def __lowerCAmelCase ( self ) -> Any: super().__post_init__() warnings.warn( """`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """ """`TrainingArguments` instead.""" , __A , ) @cached_property def __lowerCAmelCase ( self ) -> "torch.device": logger.info("""PyTorch: setting up devices""" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( """torch.distributed process group is initialized, but local_rank == -1. """ """In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" ) if self.no_cuda: lowerCAmelCase_ :int = torch.device("""cpu""" ) lowerCAmelCase_ :Any = 0 elif is_sagemaker_model_parallel_available(): lowerCAmelCase_ :str = smp.local_rank() lowerCAmelCase_ :Dict = torch.device("""cuda""" , __A ) lowerCAmelCase_ :str = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta ) lowerCAmelCase_ :str = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) ) lowerCAmelCase_ :Dict = torch.device("""cuda""" , self.local_rank ) lowerCAmelCase_ :Tuple = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 lowerCAmelCase_ :Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. lowerCAmelCase_ :Tuple = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta ) lowerCAmelCase_ :Optional[int] = torch.device("""cuda""" , self.local_rank ) lowerCAmelCase_ :Optional[Any] = 1 if device.type == "cuda": torch.cuda.set_device(__A ) return device @property def __lowerCAmelCase ( self ) -> List[str]: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __lowerCAmelCase ( self ) -> Tuple: return not is_sagemaker_model_parallel_available() @property def __lowerCAmelCase ( self ) -> Optional[int]: return False
713
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Union[str, Any]: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :List[Any] = vocab_size lowerCAmelCase_ :int = hidden_size lowerCAmelCase_ :Union[str, Any] = num_hidden_layers lowerCAmelCase_ :str = num_attention_heads lowerCAmelCase_ :str = hidden_act lowerCAmelCase_ :List[str] = intermediate_size lowerCAmelCase_ :Optional[Any] = hidden_dropout_prob lowerCAmelCase_ :List[str] = attention_probs_dropout_prob lowerCAmelCase_ :Dict = max_position_embeddings lowerCAmelCase_ :int = initializer_range lowerCAmelCase_ :Optional[int] = layer_norm_eps lowerCAmelCase_ :str = position_embedding_type lowerCAmelCase_ :Dict = use_cache
256
0
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def lowercase_( SCREAMING_SNAKE_CASE_ = "" ): '''simple docstring''' lowerCamelCase : Any = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" lowerCamelCase : Dict = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , "html.parser" ) lowerCamelCase : Optional[int] = soup.find_all("td" , attrs="titleColumn" ) lowerCamelCase : Union[str, Any] = soup.find_all("td" , class_="ratingColumn imdbRating" ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) } def lowercase_( SCREAMING_SNAKE_CASE_ = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' lowerCamelCase : Optional[Any] = get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE_ , "w" , newline="" ) as out_file: lowerCamelCase : int = csv.writer(SCREAMING_SNAKE_CASE_ ) writer.writerow(["Movie title", "IMDb rating"] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
340
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCAmelCase_ : '''simple docstring''' __A : Optional[int] = None __A : Optional[jnp.ndarray] = None __A : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _snake_case ( cls ): """simple docstring""" return cls() @dataclass class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : jnp.ndarray __A : jnp.ndarray __A : KarrasVeSchedulerState class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ): '''simple docstring''' @property def _snake_case ( self ): """simple docstring""" return True @register_to_config def __init__( self , __A = 0.02 , __A = 100 , __A = 1.007 , __A = 80 , __A = 0.05 , __A = 50 , ): """simple docstring""" pass def _snake_case ( self ): """simple docstring""" return KarrasVeSchedulerState.create() def _snake_case ( self , __A , __A , __A = () ): """simple docstring""" lowerCamelCase : List[str] = jnp.arange(0 , __A )[::-1].copy() lowerCamelCase : Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__A , schedule=jnp.array(__A , dtype=jnp.floataa ) , timesteps=__A , ) def _snake_case ( self , __A , __A , __A , __A , ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : int = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : Any = random.split(__A , num=1 ) lowerCamelCase : List[Any] = self.config.s_noise * random.normal(key=__A , shape=sample.shape ) lowerCamelCase : Dict = sigma + gamma * sigma lowerCamelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self , __A , __A , __A , __A , __A , __A = True , ): """simple docstring""" lowerCamelCase : Dict = sample_hat + sigma_hat * model_output lowerCamelCase : str = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A = True , ): """simple docstring""" lowerCamelCase : List[Any] = sample_prev + sigma_prev * model_output lowerCamelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A ) def _snake_case ( self , __A , __A , __A , __A ): """simple docstring""" raise NotImplementedError()
340
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _lowerCAmelCase ( __magic_name__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] ="gpt_neox" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str=5_04_32 , SCREAMING_SNAKE_CASE__ : Tuple=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]=44 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=64 , SCREAMING_SNAKE_CASE__ : str=2_45_76 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : int=0.25 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_00_00 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=20_48 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=None , **SCREAMING_SNAKE_CASE__ : List[str] , ): """simple docstring""" super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = rotary_pct UpperCamelCase = rotary_emb_base UpperCamelCase = attention_dropout UpperCamelCase = hidden_dropout UpperCamelCase = classifier_dropout UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = use_cache UpperCamelCase = tie_word_embeddings UpperCamelCase = use_parallel_residual UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( 'The hidden size is not divisble by the number of attention heads! Make sure to update them!' ) def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) UpperCamelCase = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE__ ) UpperCamelCase = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
712
import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging _snake_case = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Optional[int]: try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise if not is_sharded: UpperCamelCase = os.path.abspath(_lowercase ) logger.info(F'Loading PyTorch weights from {pt_path}' ) UpperCamelCase = torch.load(_lowercase , map_location='cpu' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) UpperCamelCase = convert_pytorch_state_dict_to_flax(_lowercase , _lowercase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files UpperCamelCase = convert_pytorch_sharded_state_dict_to_flax(_lowercase , _lowercase ) return flax_state_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , ) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(_lowercase ) -> bool: return len(set(_lowercase ) & {key, (model_prefix,) + key} ) > 0 # layer norm UpperCamelCase = pt_tuple_key[:-1] + ('scale',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowercase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean UpperCamelCase = pt_tuple_key[:-1] + ('mean',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowercase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var UpperCamelCase = pt_tuple_key[:-1] + ('var',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowercase ): return renamed_pt_tuple_key, pt_tensor # embedding UpperCamelCase = pt_tuple_key[:-1] + ('embedding',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowercase ): return renamed_pt_tuple_key, pt_tensor # conv layer UpperCamelCase = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowercase ): UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCamelCase = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowercase ): UpperCamelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCamelCase = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCamelCase = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 UpperCamelCase = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): UpperCamelCase = pt_tuple_key[-2] + '_g' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): UpperCamelCase = pt_tuple_key[-2] + '_v' if name is not None: UpperCamelCase = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]: # convert pytorch tensor to numpy UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCamelCase = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: UpperCamelCase = flax_model.params['params'] else: UpperCamelCase = flax_model.params UpperCamelCase = flatten_dict(_lowercase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCamelCase = flatten_dict(flax_model.params['batch_stats'] ) random_flax_state_dict.update(_lowercase ) UpperCamelCase = {} UpperCamelCase = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) UpperCamelCase = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCamelCase = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary UpperCamelCase = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase = pt_tuple_key[1:] # Correctly rename weight parameters UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor( _lowercase , _lowercase , _lowercase , _lowercase ) # add model prefix if necessary UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: UpperCamelCase = jnp.asarray(_lowercase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowercase , _lowercase ) continue # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(_lowercase ) else: # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(_lowercase ) return unflatten_dict(_lowercase ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict: import torch # Load the index UpperCamelCase = {} for shard_file in shard_filenames: # load using msgpack utils UpperCamelCase = torch.load(_lowercase ) UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCamelCase = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCamelCase = flax_model.params['params'] UpperCamelCase = flatten_dict(_lowercase ) random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) ) else: UpperCamelCase = flax_model.params UpperCamelCase = flatten_dict(_lowercase ) UpperCamelCase = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) UpperCamelCase = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCamelCase = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary UpperCamelCase = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase = pt_tuple_key[1:] # Correctly rename weight parameters UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor( _lowercase , _lowercase , _lowercase , _lowercase ) # add model prefix if necessary UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: UpperCamelCase = jnp.asarray(_lowercase ) continue if "var" in flax_key[-1]: UpperCamelCase = jnp.asarray(_lowercase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowercase , _lowercase ) continue # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(_lowercase ) else: # also add unexpected weight so that warning is thrown UpperCamelCase = jnp.asarray(_lowercase ) return unflatten_dict(_lowercase ) def __lowerCamelCase ( _lowercase , _lowercase ) -> str: UpperCamelCase = os.path.abspath(_lowercase ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class UpperCamelCase = getattr(_lowercase , 'Flax' + model.__class__.__name__ ) # load flax weight dict with open(_lowercase , 'rb' ) as state_f: try: UpperCamelCase = from_bytes(_lowercase , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]: try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda _lowercase : x.dtype == jnp.bfloataa , _lowercase ) ).values() if any(_lowercase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) UpperCamelCase = jax.tree_util.tree_map( lambda _lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowercase ) UpperCamelCase = flatten_dict(_lowercase ) UpperCamelCase = pt_model.state_dict() UpperCamelCase = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()} ) UpperCamelCase = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys UpperCamelCase = [] UpperCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCamelCase = flax_key_tuple[0] == pt_model.base_model_prefix UpperCamelCase = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowercase ) not in pt_model_dict: # conv layer UpperCamelCase = flax_key_tuple[:-1] + ('weight',) UpperCamelCase = jnp.transpose(_lowercase , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_lowercase ) not in pt_model_dict: # linear layer UpperCamelCase = flax_key_tuple[:-1] + ('weight',) UpperCamelCase = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCamelCase = flax_key_tuple[:-1] + ('weight',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: UpperCamelCase = flax_key_tuple[:-1] + ('running_mean',) elif "var" in flax_key_tuple[-1]: UpperCamelCase = flax_key_tuple[:-1] + ('running_var',) if "batch_stats" in flax_state: UpperCamelCase = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: UpperCamelCase = '.'.join(_lowercase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. UpperCamelCase = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: UpperCamelCase = key.split('.' ) UpperCamelCase = None if key_components[-3::2] == ["parametrizations", "original0"]: UpperCamelCase = key_components[-2] + '_g' elif key_components[-3::2] == ["parametrizations", "original1"]: UpperCamelCase = key_components[-2] + '_v' if name is not None: UpperCamelCase = key_components[:-3] + [name] UpperCamelCase = '.'.join(_lowercase ) UpperCamelCase = key if flax_key in special_pt_names: UpperCamelCase = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict UpperCamelCase = np.asarray(_lowercase ) if not isinstance(_lowercase , np.ndarray ) else flax_tensor UpperCamelCase = torch.from_numpy(_lowercase ) # remove from missing keys missing_keys.remove(_lowercase ) else: # weight is not expected by PyTorch model unexpected_keys.append(_lowercase ) pt_model.load_state_dict(_lowercase ) # re-transform missing_keys to list UpperCamelCase = list(_lowercase ) if len(_lowercase ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(_lowercase ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ' use it for predictions and inference.' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' 'If your task is similar to the task the model of the checkpoint was trained on, ' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
170
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline _lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} _lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} _lowerCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowerCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS _lowerCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self : Optional[Any] ): torch.manual_seed(0 ) snake_case_ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) snake_case_ : int = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) snake_case_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) snake_case_ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , ) snake_case_ : Optional[Any] = CLIPTextModel(lowercase_ ) snake_case_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowercase_ ) snake_case_ : Tuple = CLIPTextModelWithProjection(lowercase_ ) snake_case_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowercase_ ) snake_case_ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _snake_case ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple=0 ): snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case_ : str = image / 2 + 0.5 if str(lowercase_ ).startswith('''mps''' ): snake_case_ : Union[str, Any] = torch.manual_seed(lowercase_ ) else: snake_case_ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case_ : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def _snake_case ( self : Optional[Any] ): snake_case_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ : List[str] = self.get_dummy_components() snake_case_ : List[Any] = StableDiffusionXLImgaImgPipeline(**lowercase_ ) snake_case_ : List[Any] = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ : List[Any] = self.get_dummy_inputs(lowercase_ ) snake_case_ : List[str] = sd_pipe(**lowercase_ ).images snake_case_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : Dict = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : Any ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self : str ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _snake_case ( self : Dict ): pass def _snake_case ( self : Optional[int] ): snake_case_ : Union[str, Any] = self.get_dummy_components() snake_case_ : Tuple = StableDiffusionXLImgaImgPipeline(**lowercase_ ) snake_case_ : Union[str, Any] = sd_pipe.to(lowercase_ ) snake_case_ : Union[str, Any] = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) # forward without prompt embeds snake_case_ : List[str] = self.get_dummy_inputs(lowercase_ ) snake_case_ : Tuple = 3 * ['''this is a negative prompt'''] snake_case_ : Any = negative_prompt snake_case_ : Tuple = 3 * [inputs['''prompt''']] snake_case_ : Dict = sd_pipe(**lowercase_ ) snake_case_ : Tuple = output.images[0, -3:, -3:, -1] # forward with prompt embeds snake_case_ : Tuple = self.get_dummy_inputs(lowercase_ ) snake_case_ : str = 3 * ['''this is a negative prompt'''] snake_case_ : Optional[Any] = 3 * [inputs.pop('''prompt''' )] ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : str = sd_pipe.encode_prompt(lowercase_ , negative_prompt=lowercase_ ) snake_case_ : List[str] = sd_pipe( **lowercase_ , prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , pooled_prompt_embeds=lowercase_ , negative_pooled_prompt_embeds=lowercase_ , ) snake_case_ : Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : int="cpu" , lowercase_ : int=torch.floataa , lowercase_ : Optional[int]=0 ): snake_case_ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case_ : Optional[int] = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 64, 64) ) snake_case_ : List[Any] = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ ) snake_case_ : List[str] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _snake_case ( self : str ): snake_case_ : Optional[int] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ : Union[str, Any] = self.get_inputs(lowercase_ ) snake_case_ : int = pipe(**lowercase_ ).images snake_case_ : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ : Optional[Any] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
123
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor lowercase__ : str = random.Random() def __lowercase ( _a , _a=1.0 , _a=None , _a=None ): if rng is None: snake_case_ : Tuple = global_rng snake_case_ : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _UpperCAmelCase ( unittest.TestCase): def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=7 , lowercase_ : Union[str, Any]=400 , lowercase_ : Tuple=2000 , lowercase_ : str=24 , lowercase_ : Any=24 , lowercase_ : str=0.0 , lowercase_ : str=16000 , lowercase_ : Any=True , lowercase_ : Tuple=True , ): snake_case_ : Any = parent snake_case_ : Dict = batch_size snake_case_ : Tuple = min_seq_length snake_case_ : List[str] = max_seq_length snake_case_ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ : Optional[Any] = feature_size snake_case_ : Union[str, Any] = num_mel_bins snake_case_ : List[str] = padding_value snake_case_ : List[str] = sampling_rate snake_case_ : str = return_attention_mask snake_case_ : str = do_normalize def _snake_case ( self : Any ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _snake_case ( self : Tuple , lowercase_ : Tuple=False , lowercase_ : Optional[int]=False ): def _flatten(lowercase_ : Union[str, Any] ): return list(itertools.chain(*lowercase_ ) ) if equal_length: snake_case_ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case_ : Optional[int] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ : Dict = [np.asarray(lowercase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase): _lowerCAmelCase : Any = SpeechaTextFeatureExtractor if is_speech_available() else None def _snake_case ( self : str ): snake_case_ : Union[str, Any] = SpeechaTextFeatureExtractionTester(self ) def _snake_case ( self : List[str] , lowercase_ : Union[str, Any] ): self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) ) def _snake_case ( self : Optional[Any] ): # Tests that all call wrap to encode_plus and batch_encode_plus snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : Any = [np.asarray(lowercase_ ) for speech_input in speech_inputs] # Test feature size snake_case_ : Tuple = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input snake_case_ : List[str] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features snake_case_ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test batched snake_case_ : List[Any] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features snake_case_ : List[Any] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case_ : Dict = np.asarray(lowercase_ ) snake_case_ : Optional[int] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features snake_case_ : Optional[int] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def _snake_case ( self : Optional[Any] ): snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : int = ['''longest''', '''max_length''', '''do_not_pad'''] snake_case_ : List[Any] = [None, 16, None] for max_length, padding in zip(lowercase_ , lowercase_ ): snake_case_ : Union[str, Any] = feature_extractor( lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_ ) snake_case_ : Optional[int] = inputs.input_features snake_case_ : List[Any] = inputs.attention_mask snake_case_ : Any = [np.sum(lowercase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def _snake_case ( self : List[Any] ): snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : List[Any] = ['''longest''', '''max_length''', '''do_not_pad'''] snake_case_ : Dict = [None, 16, None] for max_length, padding in zip(lowercase_ , lowercase_ ): snake_case_ : Dict = feature_extractor( lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ ) snake_case_ : Any = inputs.input_features snake_case_ : Optional[Any] = inputs.attention_mask snake_case_ : List[str] = [np.sum(lowercase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def _snake_case ( self : Optional[Any] ): snake_case_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : Dict = feature_extractor( lowercase_ , padding='''max_length''' , max_length=4 , truncation=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ , ) snake_case_ : int = inputs.input_features snake_case_ : Any = inputs.attention_mask snake_case_ : List[Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def _snake_case ( self : int ): snake_case_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : List[str] = feature_extractor( lowercase_ , padding='''longest''' , max_length=4 , truncation=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ , ) snake_case_ : Union[str, Any] = inputs.input_features snake_case_ : Any = inputs.attention_mask snake_case_ : Optional[int] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : str = feature_extractor( lowercase_ , padding='''longest''' , max_length=16 , truncation=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ , ) snake_case_ : Optional[int] = inputs.input_features snake_case_ : Optional[int] = inputs.attention_mask snake_case_ : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def _snake_case ( self : Tuple ): import torch snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa ) snake_case_ : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ : str = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case_ : List[str] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _snake_case ( self : List[str] , lowercase_ : List[str] ): from datasets import load_dataset snake_case_ : str = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case_ : int = ds.sort('''id''' ).select(range(lowercase_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _snake_case ( self : str ): # fmt: off snake_case_ : List[Any] = np.array([ -1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41, -1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28, -1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25, ] ) # fmt: on snake_case_ : Tuple = self._load_datasamples(1 ) snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Tuple = feature_extractor(lowercase_ , return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
123
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowercase ( self : Optional[int] ) -> Tuple: '''simple docstring''' _lowercase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) _lowercase : str = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _lowercase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids _lowercase : Any = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids _lowercase : Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ ).loss _lowercase : Optional[Any] = -tf.math.reduce_mean(UpperCamelCase_ ).numpy() _lowercase : List[str] = -21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
704
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCamelCase__ = 3 def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->int: '''simple docstring''' print('''Generating primitive root of p''' ) while True: _lowercase : Optional[int] = random.randrange(3 , snake_case_ ) if pow(snake_case_ , 2 , snake_case_ ) == 1: continue if pow(snake_case_ , snake_case_ , snake_case_ ) == 1: continue return g def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->tuple[tuple[int, int, int, int], tuple[int, int]]: '''simple docstring''' print('''Generating prime p...''' ) _lowercase : str = rabin_miller.generate_large_prime(snake_case_ ) # select large prime number. _lowercase : Union[str, Any] = primitive_root(snake_case_ ) # one primitive root on modulo p. _lowercase : Optional[int] = random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety. _lowercase : int = cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) _lowercase : Any = (key_size, e_a, e_a, p) _lowercase : str = (key_size, d) return public_key, private_key def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : int ) ->None: '''simple docstring''' if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ): print('''\nWARNING:''' ) print( F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() _lowercase , _lowercase : Dict = generate_key(snake_case_ ) print(F"\nWriting public key to file {name}_pubkey.txt..." ) with open(F"{name}_pubkey.txt" , '''w''' ) as fo: fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" ) print(F"Writing private key to file {name}_privkey.txt..." ) with open(F"{name}_privkey.txt" , '''w''' ) as fo: fo.write(F"{private_key[0]},{private_key[1]}" ) def _SCREAMING_SNAKE_CASE( ) ->None: '''simple docstring''' print('''Making key files...''' ) make_key_files('''elgamal''' , 20_48 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
411
0
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel UpperCamelCase__ = HfApi() UpperCamelCase__ = {} # fmt: off UpperCamelCase__ = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) UpperCamelCase__ = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) UpperCamelCase__ = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) UpperCamelCase__ = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) UpperCamelCase__ = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) UpperCamelCase__ = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) UpperCamelCase__ = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) UpperCamelCase__ = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) UpperCamelCase__ = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) UpperCamelCase__ = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) UpperCamelCase__ = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) UpperCamelCase__ = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) UpperCamelCase__ = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) UpperCamelCase__ = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) UpperCamelCase__ = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on UpperCamelCase__ = api.list_models(filter='diffusers') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": UpperCamelCase__ = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('CompVis'): UpperCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet') else: UpperCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) UpperCamelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) UpperCamelCase__ = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): UpperCamelCase__ = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
322
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class UpperCAmelCase__ ( A_ ): '''simple docstring''' def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ): """simple docstring""" warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
322
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json''' # See all FNet models at https://huggingface.co/models?filter=fnet } class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ): A_ : Union[str, Any] = 'fnet' def __init__( self : Dict , UpperCamelCase_ : str=3_20_00 , UpperCamelCase_ : Tuple=7_68 , UpperCamelCase_ : int=12 , UpperCamelCase_ : Union[str, Any]=30_72 , UpperCamelCase_ : List[Any]="gelu_new" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=5_12 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=5_12 , UpperCamelCase_ : int=3 , UpperCamelCase_ : str=1 , UpperCamelCase_ : List[str]=2 , **UpperCamelCase_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) SCREAMING_SNAKE_CASE__ :int = vocab_size SCREAMING_SNAKE_CASE__ :str = max_position_embeddings SCREAMING_SNAKE_CASE__ :Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ :List[str] = num_hidden_layers SCREAMING_SNAKE_CASE__ :Optional[int] = intermediate_size SCREAMING_SNAKE_CASE__ :Tuple = hidden_act SCREAMING_SNAKE_CASE__ :Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE__ :Optional[Any] = initializer_range SCREAMING_SNAKE_CASE__ :str = type_vocab_size SCREAMING_SNAKE_CASE__ :Dict = layer_norm_eps SCREAMING_SNAKE_CASE__ :Dict = use_tpu_fourier_optimizations SCREAMING_SNAKE_CASE__ :Any = tpu_short_seq_length
702
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ): def __init__( self : Any , UpperCamelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Optional[Any]: super().__init__() SCREAMING_SNAKE_CASE__ :Any = nn.ModuleList(UpperCamelCase_ ) def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Union[torch.Tensor, float, int] , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : List[torch.tensor] , UpperCamelCase_ : List[float] , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ , self.nets ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = controlnet( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) # merge samples if i == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = down_samples, mid_sample else: SCREAMING_SNAKE_CASE__ :int = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(UpperCamelCase_ , UpperCamelCase_ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Callable = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[str] = None , ) -> List[Any]: SCREAMING_SNAKE_CASE__ :Any = 0 SCREAMING_SNAKE_CASE__ :List[str] = save_directory for controlnet in self.nets: controlnet.save_pretrained( UpperCamelCase_ , is_main_process=UpperCamelCase_ , save_function=UpperCamelCase_ , safe_serialization=UpperCamelCase_ , variant=UpperCamelCase_ , ) idx += 1 SCREAMING_SNAKE_CASE__ :str = model_path_to_save + f'''_{idx}''' @classmethod def __lowerCamelCase ( cls : str , UpperCamelCase_ : Optional[Union[str, os.PathLike]] , **UpperCamelCase_ : Dict ) -> Dict: SCREAMING_SNAKE_CASE__ :Optional[int] = 0 SCREAMING_SNAKE_CASE__ :Tuple = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE__ :Dict = pretrained_model_path while os.path.isdir(UpperCamelCase_ ): SCREAMING_SNAKE_CASE__ :Optional[Any] = ControlNetModel.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) controlnets.append(UpperCamelCase_ ) idx += 1 SCREAMING_SNAKE_CASE__ :List[Any] = pretrained_model_path + f'''_{idx}''' logger.info(f'''{len(UpperCamelCase_ )} controlnets loaded from {pretrained_model_path}.''' ) if len(UpperCamelCase_ ) == 0: raise ValueError( f'''No ControlNets found under {os.path.dirname(UpperCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.''' ) return cls(UpperCamelCase_ )
320
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ ): if isinstance(lowercase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase__ ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 256} _lowerCamelCase : str = get_size_dict(lowercase , default_to_square=lowercase ) _lowerCamelCase : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224} _lowerCamelCase : Dict = get_size_dict(lowercase , param_name='crop_size' ) _lowerCamelCase : int = do_resize _lowerCamelCase : str = size _lowerCamelCase : Optional[Any] = do_center_crop _lowerCamelCase : List[Any] = crop_size _lowerCamelCase : str = resample _lowerCamelCase : str = do_rescale _lowerCamelCase : Optional[int] = rescale_factor _lowerCamelCase : Optional[int] = offset _lowerCamelCase : Optional[Any] = do_normalize _lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def A_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ): _lowerCamelCase : List[str] = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" in size: _lowerCamelCase : Union[str, Any] = get_resize_output_image_size(lowercase , size['shortest_edge'] , default_to_square=lowercase ) elif "height" in size and "width" in size: _lowerCamelCase : Union[str, Any] = (size['height'], size['width']) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ): _lowerCamelCase : Dict = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = True , lowercase = None , **lowercase , ): _lowerCamelCase : Tuple = image.astype(np.floataa ) if offset: _lowerCamelCase : Optional[Any] = image - (scale / 2) return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) if offset and not do_rescale: raise ValueError('For offset, do_rescale must also be set to True.' ) # All transformations expect numpy arrays. _lowerCamelCase : List[Any] = to_numpy_array(lowercase ) if do_resize: _lowerCamelCase : int = self.resize(image=lowercase , size=lowercase , resample=lowercase ) if do_center_crop: _lowerCamelCase : List[str] = self.center_crop(lowercase , size=lowercase ) if do_rescale: _lowerCamelCase : int = self.rescale(image=lowercase , scale=lowercase , offset=lowercase ) if do_normalize: _lowerCamelCase : Union[str, Any] = self.normalize(image=lowercase , mean=lowercase , std=lowercase ) _lowerCamelCase : Optional[int] = to_channel_dimension_format(lowercase , lowercase ) return image def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : Any = resample if resample is not None else self.resample _lowerCamelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : List[str] = offset if offset is not None else self.offset _lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : str = image_std if image_std is not None else self.image_std _lowerCamelCase : str = size if size is not None else self.size _lowerCamelCase : Dict = get_size_dict(lowercase , default_to_square=lowercase ) _lowerCamelCase : Dict = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : str = get_size_dict(lowercase , param_name='crop_size' ) if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) _lowerCamelCase : Optional[Any] = make_batched(lowercase ) _lowerCamelCase : List[str] = [ [ self._preprocess_image( image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , offset=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , ) for img in video ] for video in videos ] _lowerCamelCase : Union[str, Any] = {'pixel_values': videos} return BatchFeature(data=lowercase , tensor_type=lowercase )
630
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : Union[str, Any] = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : Dict = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , ) return model @property def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : List[str] = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) _lowerCamelCase : int = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def A_ ( self ): _lowerCamelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowerCamelCase : Optional[int] = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _lowerCamelCase : List[str] = DDPMScheduler() _lowerCamelCase : List[Any] = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase ) _lowerCamelCase : List[Any] = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(42 ) _lowerCamelCase : Union[str, Any] = pipe(generator=lowercase , steps=4 ) _lowerCamelCase : Optional[Any] = output.audios[0] _lowerCamelCase : int = output.images[0] _lowerCamelCase : Dict = torch.Generator(device=lowercase ).manual_seed(42 ) _lowerCamelCase : Dict = pipe(generator=lowercase , steps=4 , return_dict=lowercase ) _lowerCamelCase : List[str] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _lowerCamelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCamelCase : Dict = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10] _lowerCamelCase : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _lowerCamelCase : List[Any] = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _lowerCamelCase : Dict = DDIMScheduler() _lowerCamelCase : Tuple = self.dummy_vqvae_and_unet _lowerCamelCase : List[str] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase ) _lowerCamelCase : Tuple = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) np.random.seed(0 ) _lowerCamelCase : Any = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _lowerCamelCase : Optional[int] = torch.Generator(device=lowercase ).manual_seed(42 ) _lowerCamelCase : Dict = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=10 ) _lowerCamelCase : str = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _lowerCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCamelCase : Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _lowerCamelCase : Dict = self.dummy_unet_condition _lowerCamelCase : Optional[int] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase ) _lowerCamelCase : Dict = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) np.random.seed(0 ) _lowerCamelCase : Optional[int] = torch.rand((1, 1, 10) ) _lowerCamelCase : Optional[Any] = pipe(generator=lowercase , encoding=lowercase ) _lowerCamelCase : Dict = output.images[0] _lowerCamelCase : Tuple = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCamelCase : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self ): _lowerCamelCase : Optional[Any] = torch_device _lowerCamelCase : Dict = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) _lowerCamelCase : List[str] = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : int = torch.Generator(device=lowercase ).manual_seed(42 ) _lowerCamelCase : Tuple = pipe(generator=lowercase ) _lowerCamelCase : Dict = output.audios[0] _lowerCamelCase : Dict = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _lowerCamelCase : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCamelCase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
630
1
def snake_case_ ( lowercase__ : int ): '''simple docstring''' _lowerCAmelCase =[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =0, 0, 0 _lowerCAmelCase =ugly_nums[ia] * 2 _lowerCAmelCase =ugly_nums[ia] * 3 _lowerCAmelCase =ugly_nums[ia] * 5 for _ in range(1 , lowercase__ ): _lowerCAmelCase =min(lowercase__ , lowercase__ , lowercase__ ) ugly_nums.append(lowercase__ ) if next_num == next_a: ia += 1 _lowerCAmelCase =ugly_nums[ia] * 2 if next_num == next_a: ia += 1 _lowerCAmelCase =ugly_nums[ia] * 3 if next_num == next_a: ia += 1 _lowerCAmelCase =ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'{ugly_numbers(200) = }')
712
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) # TODO Update this __SCREAMING_SNAKE_CASE : int = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __lowerCamelCase ( lowerCamelCase_ ): """simple docstring""" a_: Any = """esm""" def __init__( self : Dict , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=768 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Optional[Any]=3072 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : List[Any]=1026 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : int="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Union[str, Any] , ): super().__init__(pad_token_id=lowerCamelCase_ , mask_token_id=lowerCamelCase_ , **lowerCamelCase_ ) _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =position_embedding_type _lowerCAmelCase =use_cache _lowerCAmelCase =emb_layer_norm_before _lowerCAmelCase =token_dropout _lowerCAmelCase =is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) _lowerCAmelCase =EsmFoldConfig() elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): _lowerCAmelCase =EsmFoldConfig(**lowerCamelCase_ ) _lowerCAmelCase =esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) _lowerCAmelCase =get_default_vocab_list() else: _lowerCAmelCase =vocab_list else: _lowerCAmelCase =None _lowerCAmelCase =None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCamelCase_ ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def lowerCAmelCase__ ( self : Tuple ): _lowerCAmelCase =super().to_dict() if isinstance(self.esmfold_config , lowerCamelCase_ ): _lowerCAmelCase =self.esmfold_config.to_dict() return output @dataclass class __lowerCamelCase : """simple docstring""" a_: str = None a_: bool = True a_: bool = False a_: bool = False a_: bool = False a_: float = 0 a_: bool = True a_: bool = False a_: int = 1_28 a_: "TrunkConfig" = None def lowerCAmelCase__ ( self : str ): if self.trunk is None: _lowerCAmelCase =TrunkConfig() elif isinstance(self.trunk , lowerCamelCase_ ): _lowerCAmelCase =TrunkConfig(**self.trunk ) def lowerCAmelCase__ ( self : str ): _lowerCAmelCase =asdict(self ) _lowerCAmelCase =self.trunk.to_dict() return output @dataclass class __lowerCamelCase : """simple docstring""" a_: int = 48 a_: int = 10_24 a_: int = 1_28 a_: int = 32 a_: int = 32 a_: int = 32 a_: float = 0 a_: float = 0 a_: bool = False a_: int = 4 a_: Optional[int] = 1_28 a_: "StructureModuleConfig" = None def lowerCAmelCase__ ( self : Optional[Any] ): if self.structure_module is None: _lowerCAmelCase =StructureModuleConfig() elif isinstance(self.structure_module , lowerCamelCase_ ): _lowerCAmelCase =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" F" {self.sequence_state_dim} and {self.sequence_state_dim}." ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." ) _lowerCAmelCase =self.sequence_state_dim // self.sequence_head_width _lowerCAmelCase =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." ) if self.dropout >= 0.4: raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." ) def lowerCAmelCase__ ( self : Any ): _lowerCAmelCase =asdict(self ) _lowerCAmelCase =self.structure_module.to_dict() return output @dataclass class __lowerCamelCase : """simple docstring""" a_: int = 3_84 a_: int = 1_28 a_: int = 16 a_: int = 1_28 a_: int = 12 a_: int = 4 a_: int = 8 a_: float = 0.1 a_: int = 8 a_: int = 1 a_: int = 2 a_: int = 7 a_: int = 10 a_: float = 1e-8 a_: float = 1e5 def lowerCAmelCase__ ( self : int ): return asdict(self ) def snake_case_ ( ): '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
149
0
import argparse import struct import unittest class snake_case_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : bytes ) ->None: snake_case_ = data # Initialize hash values snake_case_ = [ 0x6_a_0_9_e_6_6_7, 0xb_b_6_7_a_e_8_5, 0x3_c_6_e_f_3_7_2, 0xa_5_4_f_f_5_3_a, 0x5_1_0_e_5_2_7_f, 0x9_b_0_5_6_8_8_c, 0x1_f_8_3_d_9_a_b, 0x5_b_e_0_c_d_1_9, ] # Initialize round constants snake_case_ = [ 0x4_2_8_a_2_f_9_8, 0x7_1_3_7_4_4_9_1, 0xb_5_c_0_f_b_c_f, 0xe_9_b_5_d_b_a_5, 0x3_9_5_6_c_2_5_b, 0x5_9_f_1_1_1_f_1, 0x9_2_3_f_8_2_a_4, 0xa_b_1_c_5_e_d_5, 0xd_8_0_7_a_a_9_8, 0x1_2_8_3_5_b_0_1, 0x2_4_3_1_8_5_b_e, 0x5_5_0_c_7_d_c_3, 0x7_2_b_e_5_d_7_4, 0x8_0_d_e_b_1_f_e, 0x9_b_d_c_0_6_a_7, 0xc_1_9_b_f_1_7_4, 0xe_4_9_b_6_9_c_1, 0xe_f_b_e_4_7_8_6, 0x0_f_c_1_9_d_c_6, 0x2_4_0_c_a_1_c_c, 0x2_d_e_9_2_c_6_f, 0x4_a_7_4_8_4_a_a, 0x5_c_b_0_a_9_d_c, 0x7_6_f_9_8_8_d_a, 0x9_8_3_e_5_1_5_2, 0xa_8_3_1_c_6_6_d, 0xb_0_0_3_2_7_c_8, 0xb_f_5_9_7_f_c_7, 0xc_6_e_0_0_b_f_3, 0xd_5_a_7_9_1_4_7, 0x0_6_c_a_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_b_7_0_a_8_5, 0x2_e_1_b_2_1_3_8, 0x4_d_2_c_6_d_f_c, 0x5_3_3_8_0_d_1_3, 0x6_5_0_a_7_3_5_4, 0x7_6_6_a_0_a_b_b, 0x8_1_c_2_c_9_2_e, 0x9_2_7_2_2_c_8_5, 0xa_2_b_f_e_8_a_1, 0xa_8_1_a_6_6_4_b, 0xc_2_4_b_8_b_7_0, 0xc_7_6_c_5_1_a_3, 0xd_1_9_2_e_8_1_9, 0xd_6_9_9_0_6_2_4, 0xf_4_0_e_3_5_8_5, 0x1_0_6_a_a_0_7_0, 0x1_9_a_4_c_1_1_6, 0x1_e_3_7_6_c_0_8, 0x2_7_4_8_7_7_4_c, 0x3_4_b_0_b_c_b_5, 0x3_9_1_c_0_c_b_3, 0x4_e_d_8_a_a_4_a, 0x5_b_9_c_c_a_4_f, 0x6_8_2_e_6_f_f_3, 0x7_4_8_f_8_2_e_e, 0x7_8_a_5_6_3_6_f, 0x8_4_c_8_7_8_1_4, 0x8_c_c_7_0_2_0_8, 0x9_0_b_e_f_f_f_a, 0xa_4_5_0_6_c_e_b, 0xb_e_f_9_a_3_f_7, 0xc_6_7_1_7_8_f_2, ] snake_case_ = self.preprocessing(self.data ) self.final_hash() @staticmethod def snake_case__( _UpperCamelCase : bytes ) ->bytes: snake_case_ = B'''\x80''' + (B'''\x00''' * (6_3 - (len(_UpperCamelCase ) + 8) % 6_4)) snake_case_ = struct.pack('''>Q''' , (len(_UpperCamelCase ) * 8) ) return data + padding + big_endian_integer def snake_case__( self : Optional[Any] ) ->None: # Convert into blocks of 64 bytes snake_case_ = [ self.preprocessed_data[x : x + 6_4] for x in range(0 , len(self.preprocessed_data ) , 6_4 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers snake_case_ = list(struct.unpack('''>16L''' , _UpperCamelCase ) ) # add 48 0-ed integers words += [0] * 4_8 snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = self.hashes for index in range(0 , 6_4 ): if index > 1_5: # modify the zero-ed indexes at the end of the array snake_case_ = ( self.ror(words[index - 1_5] , 7 ) ^ self.ror(words[index - 1_5] , 1_8 ) ^ (words[index - 1_5] >> 3) ) snake_case_ = ( self.ror(words[index - 2] , 1_7 ) ^ self.ror(words[index - 2] , 1_9 ) ^ (words[index - 2] >> 1_0) ) snake_case_ = ( words[index - 1_6] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression snake_case_ = self.ror(_UpperCamelCase , 6 ) ^ self.ror(_UpperCamelCase , 1_1 ) ^ self.ror(_UpperCamelCase , 2_5 ) snake_case_ = (e & f) ^ ((~e & 0xf_f_f_f_f_f_f_f) & g) snake_case_ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 snake_case_ = self.ror(_UpperCamelCase , 2 ) ^ self.ror(_UpperCamelCase , 1_3 ) ^ self.ror(_UpperCamelCase , 2_2 ) snake_case_ = (a & b) ^ (a & c) ^ (b & c) snake_case_ = (sa + maj) % 0x1_0_0_0_0_0_0_0_0 snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = ( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) snake_case_ = [a, b, c, d, e, f, g, h] # Modify final values snake_case_ = [ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] snake_case_ = ''''''.join([hex(_UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int ) ->int: return 0xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations) class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any ) ->None: import hashlib snake_case_ = bytes('''Test String''' , '''utf-8''' ) self.assertEqual(SHAaaa(_UpperCamelCase ).hash , hashlib.shaaaa(_UpperCamelCase ).hexdigest() ) def __SCREAMING_SNAKE_CASE (): import doctest doctest.testmod() snake_case_ = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) snake_case_ = parser.parse_args() snake_case_ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: snake_case_ = f.read() else: snake_case_ = bytes(SCREAMING_SNAKE_CASE__ , '''utf-8''' ) print(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash ) if __name__ == "__main__": main()
39
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) enable_full_determinism() class a__( snake_case__ , snake_case__ , unittest.TestCase ): a_ : Dict = UNetaDModel a_ : List[Any] = '''sample''' @property def _lowercase ( self ) -> Tuple: snake_case__ =4 snake_case__ =3 snake_case__ =(32, 32) snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase ) snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def _lowercase ( self ) -> Optional[int]: return (3, 32, 32) @property def _lowercase ( self ) -> Optional[int]: return (3, 32, 32) def _lowercase ( self ) -> Union[str, Any]: snake_case__ ={ 'block_out_channels': (32, 64), 'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'), 'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'), 'attention_head_dim': 3, 'out_channels': 3, 'in_channels': 3, 'layers_per_block': 2, 'sample_size': 32, } snake_case__ =self.dummy_input return init_dict, inputs_dict class a__( snake_case__ , snake_case__ , unittest.TestCase ): a_ : Union[str, Any] = UNetaDModel a_ : Optional[Any] = '''sample''' @property def _lowercase ( self ) -> Union[str, Any]: snake_case__ =4 snake_case__ =4 snake_case__ =(32, 32) snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase ) snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def _lowercase ( self ) -> Optional[int]: return (4, 32, 32) @property def _lowercase ( self ) -> Dict: return (4, 32, 32) def _lowercase ( self ) -> str: snake_case__ ={ 'sample_size': 32, 'in_channels': 4, 'out_channels': 4, 'layers_per_block': 2, 'block_out_channels': (32, 64), 'attention_head_dim': 32, 'down_block_types': ('DownBlock2D', 'DownBlock2D'), 'up_block_types': ('UpBlock2D', 'UpBlock2D'), } snake_case__ =self.dummy_input return init_dict, inputs_dict def _lowercase ( self ) -> Dict: snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(_UpperCAmelCase ) snake_case__ =model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def _lowercase ( self ) -> Optional[Any]: snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase ) model.to(_UpperCAmelCase ) snake_case__ =model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def _lowercase ( self ) -> Optional[Any]: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase ) model_accelerate.to(_UpperCAmelCase ) model_accelerate.eval() snake_case__ =torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case__ =noise.to(_UpperCAmelCase ) snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase ) snake_case__ =model_accelerate(_UpperCAmelCase , _UpperCAmelCase )['sample'] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() snake_case__ , snake_case__ =UNetaDModel.from_pretrained( 'fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase , low_cpu_mem_usage=_UpperCAmelCase ) model_normal_load.to(_UpperCAmelCase ) model_normal_load.eval() snake_case__ =model_normal_load(_UpperCAmelCase , _UpperCAmelCase )['sample'] assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 ) def _lowercase ( self ) -> Optional[Any]: snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ) model.eval() model.to(_UpperCAmelCase ) snake_case__ =torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case__ =noise.to(_UpperCAmelCase ) snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase ) with torch.no_grad(): snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample snake_case__ =output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case__ =torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 ) ) class a__( snake_case__ , snake_case__ , unittest.TestCase ): a_ : List[str] = UNetaDModel a_ : Optional[int] = '''sample''' @property def _lowercase ( self , _UpperCAmelCase=(32, 32) ) -> Tuple: snake_case__ =4 snake_case__ =3 snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase ) snake_case__ =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def _lowercase ( self ) -> Union[str, Any]: return (3, 32, 32) @property def _lowercase ( self ) -> Optional[Any]: return (3, 32, 32) def _lowercase ( self ) -> str: snake_case__ ={ 'block_out_channels': [32, 64, 64, 64], 'in_channels': 3, 'layers_per_block': 1, 'out_channels': 3, 'time_embedding_type': 'fourier', 'norm_eps': 1E-6, 'mid_block_scale_factor': math.sqrt(2.0 ), 'norm_num_groups': None, 'down_block_types': [ 'SkipDownBlock2D', 'AttnSkipDownBlock2D', 'SkipDownBlock2D', 'SkipDownBlock2D', ], 'up_block_types': [ 'SkipUpBlock2D', 'SkipUpBlock2D', 'AttnSkipUpBlock2D', 'SkipUpBlock2D', ], } snake_case__ =self.dummy_input return init_dict, inputs_dict @slow def _lowercase ( self ) -> List[Any]: snake_case__ , snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(_UpperCAmelCase ) snake_case__ =self.dummy_input snake_case__ =floats_tensor((4, 3) + (256, 256) ).to(_UpperCAmelCase ) snake_case__ =noise snake_case__ =model(**_UpperCAmelCase ) assert image is not None, "Make sure output is not None" @slow def _lowercase ( self ) -> Union[str, Any]: snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ) model.to(_UpperCAmelCase ) snake_case__ =4 snake_case__ =3 snake_case__ =(256, 256) snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase ) snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase ) with torch.no_grad(): snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample snake_case__ =output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case__ =torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) ) def _lowercase ( self ) -> List[Any]: snake_case__ =UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' ) model.to(_UpperCAmelCase ) snake_case__ =4 snake_case__ =3 snake_case__ =(32, 32) snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase ) snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase ) with torch.no_grad(): snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample snake_case__ =output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case__ =torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) ) def _lowercase ( self ) -> Optional[Any]: # not required for this model pass
538
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : Union[str, Any] = logging.get_logger(__name__) A_ : str = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class __UpperCamelCase (A__ ,A__ ): lowerCamelCase__ : Optional[int] = 'focalnet' def __init__( self : List[Any] , __UpperCAmelCase : Any=2_2_4 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[Any]=9_6 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , __UpperCAmelCase : Optional[Any]=[2, 2, 6, 2] , __UpperCAmelCase : Dict=[2, 2, 2, 2] , __UpperCAmelCase : Dict=[3, 3, 3, 3] , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=4.0 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Any=1e-4 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=False , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : str=3_2 , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : List[Any] , ) -> List[Any]: super().__init__(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = patch_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = embed_dim SCREAMING_SNAKE_CASE__ = use_conv_embed SCREAMING_SNAKE_CASE__ = hidden_sizes SCREAMING_SNAKE_CASE__ = depths SCREAMING_SNAKE_CASE__ = focal_levels SCREAMING_SNAKE_CASE__ = focal_windows SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = mlp_ratio SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = drop_path_rate SCREAMING_SNAKE_CASE__ = use_layerscale SCREAMING_SNAKE_CASE__ = layerscale_value SCREAMING_SNAKE_CASE__ = use_post_layernorm SCREAMING_SNAKE_CASE__ = use_post_layernorm_in_modulation SCREAMING_SNAKE_CASE__ = normalize_modulator SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = encoder_stride SCREAMING_SNAKE_CASE__ = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
719
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A_ : Union[str, Any] = logging.get_logger(__name__) A_ : Tuple = "▁" A_ : int = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", "tokenizer_config_file": "tokenizer_config.json", } A_ : Dict = { "vocab_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json", }, "spm_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model", }, "tokenizer_config_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json", }, } A_ : Dict = { "facebook/m2m100_418M": 1_024, } # fmt: off A_ : Any = { "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"], "wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"] } class lowerCamelCase (A__ ): lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES lowerCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask'] lowerCamelCase__ : List[int] = [] lowerCamelCase__ : List[int] = [] def __init__( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : Any="<pad>" , __UpperCAmelCase : str="<unk>" , __UpperCAmelCase : Dict="m2m100" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : str=8 , **__UpperCAmelCase : str , ) -> None: SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE__ = language_codes SCREAMING_SNAKE_CASE__ = FAIRSEQ_LANGUAGE_CODES[language_codes] SCREAMING_SNAKE_CASE__ = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code} SCREAMING_SNAKE_CASE__ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__UpperCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__UpperCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , language_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__UpperCAmelCase , **__UpperCAmelCase , ) SCREAMING_SNAKE_CASE__ = vocab_file SCREAMING_SNAKE_CASE__ = load_json(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE__ = spm_file SCREAMING_SNAKE_CASE__ = load_spm(__UpperCAmelCase , self.sp_model_kwargs ) SCREAMING_SNAKE_CASE__ = len(self.encoder ) SCREAMING_SNAKE_CASE__ = { self.get_lang_token(__UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase ) } SCREAMING_SNAKE_CASE__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase )} SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.lang_token_to_id.items()} SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else """en""" SCREAMING_SNAKE_CASE__ = tgt_lang SCREAMING_SNAKE_CASE__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) SCREAMING_SNAKE_CASE__ = num_madeup_words @property def SCREAMING_SNAKE_CASE ( self : int ) -> int: return len(self.encoder ) + len(self.lang_token_to_id ) @property def SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str ) -> None: SCREAMING_SNAKE_CASE__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Tuple ) -> Tuple: if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] ) def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int ) -> str: if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__UpperCAmelCase , self.unk_token ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCAmelCase ) + token SCREAMING_SNAKE_CASE__ = [] else: current_sub_tokens.append(__UpperCAmelCase ) out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = [1] * len(self.prefix_tokens ) SCREAMING_SNAKE_CASE__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: SCREAMING_SNAKE_CASE__ = self.__dict__.copy() SCREAMING_SNAKE_CASE__ = None return state def __setstate__( self : Union[str, Any] , __UpperCAmelCase : Dict ) -> None: SCREAMING_SNAKE_CASE__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = load_spm(self.spm_file , self.sp_model_kwargs ) def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase ) if not save_dir.is_dir(): raise OSError(F"""{save_directory} should be a directory""" ) SCREAMING_SNAKE_CASE__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__UpperCAmelCase , """wb""" ) as fi: SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (str(__UpperCAmelCase ), str(__UpperCAmelCase )) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "en" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "ro" , **__UpperCAmelCase : str , ) -> BatchEncoding: SCREAMING_SNAKE_CASE__ = src_lang SCREAMING_SNAKE_CASE__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : Tuple ) -> str: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) SCREAMING_SNAKE_CASE__ = src_lang SCREAMING_SNAKE_CASE__ = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = self.get_lang_id(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE ( self : str ) -> str: self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> None: SCREAMING_SNAKE_CASE__ = self.get_lang_token(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = self.lang_token_to_id[lang_token] SCREAMING_SNAKE_CASE__ = [self.cur_lang_id] SCREAMING_SNAKE_CASE__ = [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str ) -> None: SCREAMING_SNAKE_CASE__ = self.get_lang_token(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = self.lang_token_to_id[lang_token] SCREAMING_SNAKE_CASE__ = [self.cur_lang_id] SCREAMING_SNAKE_CASE__ = [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str ) -> str: return self.lang_code_to_token[lang] def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> int: SCREAMING_SNAKE_CASE__ = self.get_lang_token(__UpperCAmelCase ) return self.lang_token_to_id[lang_token] def A ( snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = sentencepiece.SentencePieceProcessor(**snake_case__ ) spm.Load(str(snake_case__ ) ) return spm def A ( snake_case__ ): '''simple docstring''' with open(snake_case__ , """r""" ) as f: return json.load(snake_case__ ) def A ( snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , """w""" ) as f: json.dump(snake_case__ , snake_case__ , indent=2 )
616
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase : int ={ "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str =["ChineseCLIPFeatureExtractor"] __lowerCAmelCase : str =["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any =[ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __lowerCAmelCase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
440
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __lowerCAmelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowerCAmelCase = field( default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowerCAmelCase = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) __lowerCAmelCase = field( default=__lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __lowerCAmelCase = field( default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class SCREAMING_SNAKE_CASE_ : __lowerCAmelCase = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) __lowerCAmelCase = field( default=__lowerCAmelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) __lowerCAmelCase = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowerCAmelCase = field( default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def lowercase( ) -> Optional[Any]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" """ --overwrite_output_dir to overcome.""" ) UpperCamelCase = import_module("""tasks""" ) try: UpperCamelCase = getattr(UpperCamelCase_ , model_args.task_type ) UpperCamelCase = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , UpperCamelCase_ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task UpperCamelCase = token_classification_task.get_labels(data_args.labels ) UpperCamelCase = dict(enumerate(UpperCamelCase_ ) ) UpperCamelCase = len(UpperCamelCase_ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid={label: i for i, label in enumerate(UpperCamelCase_ )} , cache_dir=model_args.cache_dir , ) UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) UpperCamelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets UpperCamelCase = ( TokenClassificationDataset( token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) UpperCamelCase = ( TokenClassificationDataset( token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCamelCase_ , UpperCamelCase_ ) -> Tuple[List[int], List[int]]: UpperCamelCase = np.argmax(UpperCamelCase_ , axis=2 ) UpperCamelCase , UpperCamelCase = preds.shape UpperCamelCase = [[] for _ in range(UpperCamelCase_ )] UpperCamelCase = [[] for _ in range(UpperCamelCase_ )] for i in range(UpperCamelCase_ ): for j in range(UpperCamelCase_ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCamelCase_ ) -> Dict: UpperCamelCase , UpperCamelCase = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCamelCase_ , UpperCamelCase_ ), "precision": precision_score(UpperCamelCase_ , UpperCamelCase_ ), "recall": recall_score(UpperCamelCase_ , UpperCamelCase_ ), "f1": fa_score(UpperCamelCase_ , UpperCamelCase_ ), } # Data collator UpperCamelCase = DataCollatorWithPadding(UpperCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer UpperCamelCase = Trainer( model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , data_collator=UpperCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCamelCase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) UpperCamelCase = trainer.evaluate() UpperCamelCase = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_process_zero(): with open(UpperCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , UpperCamelCase_ , UpperCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(UpperCamelCase_ ) # Predict if training_args.do_predict: UpperCamelCase = TokenClassificationDataset( token_classification_task=UpperCamelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase_ , labels=UpperCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) UpperCamelCase , UpperCamelCase , UpperCamelCase = trainer.predict(UpperCamelCase_ ) UpperCamelCase , UpperCamelCase = align_predictions(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = os.path.join(training_args.output_dir , """test_results.txt""" ) if trainer.is_world_process_zero(): with open(UpperCamelCase_ , """w""" ) as writer: for key, value in metrics.items(): logger.info(""" %s = %s""" , UpperCamelCase_ , UpperCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) # Save predictions UpperCamelCase = os.path.join(training_args.output_dir , """test_predictions.txt""" ) if trainer.is_world_process_zero(): with open(UpperCamelCase_ , """w""" ) as writer: with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f: token_classification_task.write_predictions_to_file(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return results def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
537
0
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class _lowercase ( A__ ): '''simple docstring''' def __init__( self :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Tuple: super().__init__() self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) @torch.no_grad() def __call__( self :List[str] , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :Optional[torch.Generator] = None , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Any , ) -> Union[ImagePipelineOutput, Tuple]: __SCREAMING_SNAKE_CASE : int = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : int = image.to(self.device ) # set step values self.scheduler.set_timesteps(lowerCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __SCREAMING_SNAKE_CASE : Any = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample __SCREAMING_SNAKE_CASE : int = (image / 2 + 0.5).clamp(0 , 1 ) __SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=lowerCAmelCase__ ), "This is a local test"
260
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = (EulerDiscreteScheduler,) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10 def __magic_name__( self :Dict , **lowerCAmelCase__ :Any ) -> int: __SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1_100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCAmelCase__ ) return config def __magic_name__( self :str ) -> Optional[Any]: for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def __magic_name__( self :str ) -> List[str]: for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def __magic_name__( self :Dict ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def __magic_name__( self :Dict ) -> int: __SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config() __SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model() __SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE : Any = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample __SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def __magic_name__( self :Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' ) __SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE : Dict = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = output.prev_sample __SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def __magic_name__( self :Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() __SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = self.dummy_model() __SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: __SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample __SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def __magic_name__( self :List[Any] ) -> int: __SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() __SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() __SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: __SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = output.prev_sample __SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
260
1
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger _a: Dict = get_logger(__name__) _a: int = r"""\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n""" class __UpperCamelCase : @add_start_docstrings(__lowerCAmelCase ) def __call__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ): '''simple docstring''' raise NotImplementedError( F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class __UpperCamelCase : @add_start_docstrings(__lowerCAmelCase ) def __call__( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : int ): '''simple docstring''' raise NotImplementedError( F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): @add_start_docstrings(__lowerCAmelCase ) def __call__( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ): '''simple docstring''' for processor in self: UpperCAmelCase_ = inspect.signature(processor.__call__ ).parameters if len(__lowerCAmelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F"Make sure that all the required parameters: {list(function_args.keys() )} for " F"{processor.__class__} are passed to the logits processor." ) UpperCAmelCase_ = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) else: UpperCAmelCase_ = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Dict , lowerCAmelCase : Tuple ): '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not (temperature > 0): raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" ) UpperCAmelCase_ = temperature def __call__( self : int , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Any ): '''simple docstring''' UpperCAmelCase_ = scores / self.temperature return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] = -float("Inf" ) , lowerCAmelCase : List[Any] = 1 ): '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (min_tokens_to_keep < 1): raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" ) UpperCAmelCase_ = top_p UpperCAmelCase_ = filter_value UpperCAmelCase_ = min_tokens_to_keep def __call__( self : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Dict ): '''simple docstring''' UpperCAmelCase_ = lax.top_k(__lowerCAmelCase , scores.shape[-1] ) UpperCAmelCase_ = jnp.full_like(__lowerCAmelCase , self.filter_value ) UpperCAmelCase_ = jax.nn.softmax(__lowerCAmelCase , axis=-1 ).cumsum(axis=-1 ) UpperCAmelCase_ = cumulative_probs < self.top_p # include the token that is higher than top_p as well UpperCAmelCase_ = jnp.roll(__lowerCAmelCase , 1 ) score_mask |= score_mask.at[:, 0].set(__lowerCAmelCase ) # min tokens to keep UpperCAmelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCAmelCase ) UpperCAmelCase_ = jnp.where(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase_ = jax.lax.sort_key_val(__lowerCAmelCase , __lowerCAmelCase )[-1] return next_scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : int = -float("Inf" ) , lowerCAmelCase : Optional[int] = 1 ): '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k <= 0: raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" ) UpperCAmelCase_ = max(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase_ = filter_value def __call__( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str ): '''simple docstring''' UpperCAmelCase_ = scores.shape UpperCAmelCase_ = jnp.full(batch_size * vocab_size , self.filter_value ) UpperCAmelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check UpperCAmelCase_ = lax.top_k(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase_ = jnp.broadcast_to((jnp.arange(__lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() UpperCAmelCase_ = topk_scores.flatten() UpperCAmelCase_ = topk_indices.flatten() + shift UpperCAmelCase_ = next_scores_flat.at[topk_indices_flat].set(__lowerCAmelCase ) UpperCAmelCase_ = next_scores_flat.reshape(__lowerCAmelCase , __lowerCAmelCase ) return next_scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Dict , lowerCAmelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = bos_token_id def __call__( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase_ = 1 - jnp.bool_(cur_len - 1 ) UpperCAmelCase_ = jnp.where(__lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCAmelCase ) return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict ): '''simple docstring''' UpperCAmelCase_ = max_length UpperCAmelCase_ = eos_token_id def __call__( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ): '''simple docstring''' UpperCAmelCase_ = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) UpperCAmelCase_ = jnp.where(__lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCAmelCase ) return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any ): '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or min_length < 0: raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or eos_token_id < 0: raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" ) UpperCAmelCase_ = min_length UpperCAmelCase_ = eos_token_id def __call__( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ): '''simple docstring''' UpperCAmelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) UpperCAmelCase_ = jnp.where(__lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __lowerCAmelCase ) return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = list(__lowerCAmelCase ) UpperCAmelCase_ = begin_index def __call__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = 1 - jnp.bool_(cur_len - self.begin_index ) UpperCAmelCase_ = jnp.where(__lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __lowerCAmelCase ) return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = list(__lowerCAmelCase ) def __call__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] ): '''simple docstring''' UpperCAmelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : int , lowerCAmelCase : Tuple ): '''simple docstring''' UpperCAmelCase_ = dict(__lowerCAmelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. UpperCAmelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: UpperCAmelCase_ = force_token_array.at[index].set(__lowerCAmelCase ) UpperCAmelCase_ = jnp.intaa(__lowerCAmelCase ) def __call__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Dict ): '''simple docstring''' def _force_token(lowerCAmelCase : List[Any] ): UpperCAmelCase_ = scores.shape[0] UpperCAmelCase_ = self.force_token_array[generation_idx] UpperCAmelCase_ = jnp.ones_like(__lowerCAmelCase , dtype=scores.dtype ) * -float("inf" ) UpperCAmelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) UpperCAmelCase_ = lax.dynamic_update_slice(__lowerCAmelCase , __lowerCAmelCase , (0, current_token) ) return new_scores UpperCAmelCase_ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCAmelCase ) , lambda: scores , ) , ) return scores class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Tuple ): '''simple docstring''' UpperCAmelCase_ = generate_config.eos_token_id UpperCAmelCase_ = generate_config.no_timestamps_token_id UpperCAmelCase_ = generate_config.no_timestamps_token_id + 1 UpperCAmelCase_ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__lowerCAmelCase , "max_initial_timestamp_index" ): UpperCAmelCase_ = generate_config.max_initial_timestamp_index else: UpperCAmelCase_ = model_config.vocab_size if self.max_initial_timestamp_index is None: UpperCAmelCase_ = model_config.vocab_size def __call__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): '''simple docstring''' UpperCAmelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ): UpperCAmelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase_ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCAmelCase , ) UpperCAmelCase_ = jnp.where((cur_len - self.begin_index) < 2 , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase_ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCAmelCase , __lowerCAmelCase , ) return jnp.where( __lowerCAmelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __lowerCAmelCase , ) UpperCAmelCase_ = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase_ = jnp.where(cur_len == self.begin_index , __lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase_ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCAmelCase , ) UpperCAmelCase_ = self.timestamp_begin + self.max_initial_timestamp_index UpperCAmelCase_ = jnp.where( __lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __lowerCAmelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp UpperCAmelCase_ = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 ) def handle_cumulative_probs(lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ): UpperCAmelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) UpperCAmelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __lowerCAmelCase , ) UpperCAmelCase_ = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) return scores
162
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase =logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __a : List[Any] = ['''input_features''', '''attention_mask'''] def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=1_60_00 , __lowerCAmelCase=80 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ): super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase ) UpperCamelCase_ : int = num_mel_bins UpperCamelCase_ : Union[str, Any] = do_ceptral_normalize UpperCamelCase_ : Any = normalize_means UpperCamelCase_ : int = normalize_vars UpperCamelCase_ : int = True def _UpperCAmelCase ( self , __lowerCAmelCase , ): UpperCamelCase_ : Optional[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers UpperCamelCase_ : Optional[int] = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 ) UpperCamelCase_ : Tuple = ta_kaldi.fbank(__lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: UpperCamelCase_ : int = x[:input_length].mean(axis=0 ) UpperCamelCase_ : str = np.subtract(__lowerCAmelCase , __lowerCAmelCase ) if normalize_vars: UpperCamelCase_ : Optional[int] = x[:input_length].std(axis=0 ) UpperCamelCase_ : Optional[Any] = np.divide(__lowerCAmelCase , __lowerCAmelCase ) if input_length < x.shape[0]: UpperCamelCase_ : Any = padding_value # make sure array is in float32 UpperCamelCase_ : str = x.astype(np.floataa ) return x def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): UpperCamelCase_ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__lowerCAmelCase , __lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__lowerCAmelCase , __lowerCAmelCase ) ] def __call__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" F" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCamelCase_ : Optional[int] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) UpperCamelCase_ : List[str] = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase_ : str = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): UpperCamelCase_ : Optional[Any] = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase_ : int = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase_ : Tuple = [raw_speech] # extract fbank features UpperCamelCase_ : Any = [self._extract_fbank_features(__lowerCAmelCase ) for waveform in raw_speech] # convert into correct format for padding UpperCamelCase_ : Tuple = BatchFeature({"""input_features""": features} ) UpperCamelCase_ : Tuple = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) # make sure list is in array format UpperCamelCase_ : str = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __lowerCAmelCase ): UpperCamelCase_ : Any = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features] UpperCamelCase_ : Union[str, Any] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCamelCase_ : Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: UpperCamelCase_ : List[str] = ( np.array(__lowerCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) UpperCamelCase_ : Optional[int] = self.normalize( padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase ) if return_tensors is not None: UpperCamelCase_ : Optional[Any] = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs
208
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __lowercase (__lowerCamelCase ): _lowerCamelCase = 42 class __lowercase (__lowerCamelCase , __lowerCamelCase ): @register_to_config def __init__( self : List[Any] , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase_ : Tuple[int] = (64,) , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "silu" , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : float = 0.1_82_15 , UpperCAmelCase_ : str = "group" , ): super().__init__() # pass init params to Encoder UpperCamelCase__ : List[Any] = Encoder( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , down_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , double_z=UpperCAmelCase_ , ) UpperCamelCase__ : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels UpperCamelCase__ : str = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1) UpperCamelCase__ : Tuple = VectorQuantizer(UpperCAmelCase_ , UpperCAmelCase_ , beta=0.25 , remap=UpperCAmelCase_ , sane_index_shape=UpperCAmelCase_) UpperCamelCase__ : Any = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1) # pass init params to Decoder UpperCamelCase__ : List[str] = Decoder( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , up_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , norm_type=UpperCAmelCase_ , ) @apply_forward_hook def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True): UpperCamelCase__ : Dict = self.encoder(UpperCAmelCase_) UpperCamelCase__ : Dict = self.quant_conv(UpperCAmelCase_) if not return_dict: return (h,) return VQEncoderOutput(latents=UpperCAmelCase_) @apply_forward_hook def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True): # also go through quantization layer if not force_not_quantize: UpperCamelCase__ : List[str] = self.quantize(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = h UpperCamelCase__ : Union[str, Any] = self.post_quant_conv(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.decoder(UpperCAmelCase_ , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True): UpperCamelCase__ : str = sample UpperCamelCase__ : int = self.encode(UpperCAmelCase_).latents UpperCamelCase__ : Union[str, Any] = self.decode(UpperCAmelCase_).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase_)
700
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None: UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_) for k, v in tqdm(state_dict.items()): if not isinstance(lowerCamelCase_ , torch.Tensor): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin') UpperCamelCase__ : int = v.half() if save_path is None: # overwrite src_path UpperCamelCase__ : List[Any] = src_path torch.save(lowerCamelCase_ , lowerCamelCase_) if __name__ == "__main__": fire.Fire(convert)
6
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __UpperCAmelCase = get_logger() __UpperCAmelCase = None class lowerCAmelCase_ ( TensorFormatter[Mapping, "jax.Array", Mapping] ): def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> List[Any]: super().__init__(features=SCREAMING_SNAKE_CASE_ ) import jax from jaxlib.xla_client import Device if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): raise ValueError( F"""Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE_ )}, as `jaxlib.xla_extension.Device` """ 'is not serializable neither with `pickle` nor with `dill`. Instead you can surround ' 'the device with `str()` to get its string identifier that will be internally mapped ' 'to the actual `jaxlib.xla_extension.Device`.' ) UpperCamelCase : Union[str, Any] = device if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: UpperCamelCase : int = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F"""Device with string identifier {self.device} not listed among the available """ F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """ F"""device: {str(jax.devices()[0] )}.""" ) UpperCamelCase : Dict = str(jax.devices()[0] ) UpperCamelCase : Dict = jnp_array_kwargs @staticmethod def snake_case_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]: import jax return {str(SCREAMING_SNAKE_CASE_ ): device for device in jax.devices()} def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column: if all( isinstance(SCREAMING_SNAKE_CASE_, jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(SCREAMING_SNAKE_CASE_, axis=0 ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ): return value elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ): return value.tolist() UpperCamelCase : Optional[Any] = {} if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: UpperCamelCase : List[Any] = {'dtype': jnp.intaa} else: UpperCamelCase : Optional[int] = {'dtype': jnp.intaa} elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ): UpperCamelCase : Union[str, Any] = {'dtype': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ): UpperCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: UpperCamelCase : Dict = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.jnp_array_kwargs} ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict: import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, jax.Array ): UpperCamelCase : Any = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any: return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "jax.Array": UpperCamelCase : str = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] ) UpperCamelCase : Tuple = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = self._consolidate(SCREAMING_SNAKE_CASE_ ) return column def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping: UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for column_name in batch: UpperCamelCase : List[Any] = self._consolidate(batch[column_name] ) return batch
40
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A ( __lowerCamelCase ) -> List[str]: return 1.0 / (1.0 + np.exp(-_outputs )) def __A ( __lowerCamelCase ) -> List[str]: a = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) a = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''sigmoid''' UpperCamelCase__ = '''softmax''' UpperCamelCase__ = '''none''' @add_end_docstrings( __magic_name__ , r''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = False UpperCamelCase__ = ClassificationFunction.NONE def __init__( self :List[str] , **__magic_name__ :List[Any] ): '''simple docstring''' super().__init__(**__magic_name__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def lowerCamelCase__ ( self :Any , __magic_name__ :int=None , __magic_name__ :Any=None , __magic_name__ :Union[str, Any]="" , **__magic_name__ :Tuple ): '''simple docstring''' a = tokenizer_kwargs a = {} if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None: a = self.model.config.return_all_scores if isinstance(__magic_name__ , __magic_name__ ) or top_k is None: a = top_k a = False elif return_all_scores is not None: warnings.warn( """`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of""" """ `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __magic_name__ , ) if return_all_scores: a = None else: a = 1 if isinstance(__magic_name__ , __magic_name__ ): a = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: a = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self :Dict , *__magic_name__ :Optional[int] , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = super().__call__(*__magic_name__ , **__magic_name__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. a = """top_k""" not in kwargs if isinstance(args[0] , __magic_name__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[Any] , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = self.framework if isinstance(__magic_name__ , __magic_name__ ): return self.tokenizer(**__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1 and isinstance(inputs[0] , __magic_name__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__magic_name__ , **__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( """The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a""" """ dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" ) return self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Tuple ): '''simple docstring''' return self.model(**__magic_name__ ) def lowerCamelCase__ ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :int=None , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Tuple=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: a = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: a = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None: a = self.model.config.function_to_apply else: a = ClassificationFunction.NONE a = model_outputs["""logits"""][0] a = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: a = sigmoid(__magic_name__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: a = softmax(__magic_name__ ) elif function_to_apply == ClassificationFunction.NONE: a = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} a = [ {"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__magic_name__ ) ] if not _legacy: dict_scores.sort(key=lambda __magic_name__ : x["score"] , reverse=__magic_name__ ) if top_k is not None: a = dict_scores[:top_k] return dict_scores
468
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json''' ), '''distilbert-base-uncased-finetuned-sst-2-english''': ( '''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json''' ), } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Optional[Any] = "distilbert" SCREAMING_SNAKE_CASE_ : List[str] = { "hidden_size": "dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", } def __init__( self : int , A : str=3_05_22 , A : Union[str, Any]=5_12 , A : Tuple=False , A : str=6 , A : Union[str, Any]=12 , A : Any=7_68 , A : List[Any]=4 * 7_68 , A : Any=0.1 , A : Optional[Any]=0.1 , A : Optional[int]="gelu" , A : int=0.02 , A : Tuple=0.1 , A : List[Any]=0.2 , A : int=0 , **A : Optional[Any] , ) -> Optional[int]: lowercase_ : List[Any] = vocab_size lowercase_ : Union[str, Any] = max_position_embeddings lowercase_ : Dict = sinusoidal_pos_embds lowercase_ : str = n_layers lowercase_ : Optional[int] = n_heads lowercase_ : int = dim lowercase_ : Dict = hidden_dim lowercase_ : Dict = dropout lowercase_ : List[Any] = attention_dropout lowercase_ : Dict = activation lowercase_ : List[str] = initializer_range lowercase_ : Dict = qa_dropout lowercase_ : Optional[int] = seq_classif_dropout super().__init__(**A , pad_token_id=A ) class _UpperCAmelCase ( _A ): @property def A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase_ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase_ : List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
702
"""simple docstring""" from math import ceil, sqrt def lowercase ( __snake_case : int = 1_0_0_0_0_0_0 ): lowercase_ : Tuple = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowercase_ : int = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowercase_ : int = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
141
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowerCamelCase : Any =KandinskyInpaintPipeline lowerCamelCase : Tuple =["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] lowerCamelCase : List[Any] =[ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] lowerCamelCase : str =[ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] lowerCamelCase : Any =False @property def __a ( self ) -> str: return 32 @property def __a ( self ) -> List[Any]: return 32 @property def __a ( self ) -> List[str]: return self.time_input_dim @property def __a ( self ) -> Union[str, Any]: return self.time_input_dim * 4 @property def __a ( self ) -> Any: return 100 @property def __a ( self ) -> Tuple: a : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __a ( self ) -> Optional[int]: torch.manual_seed(0 ) a : Union[str, Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) a : Optional[Any] = MultilingualCLIP(SCREAMING_SNAKE_CASE__ ) a : List[Any] = text_encoder.eval() return text_encoder @property def __a ( self ) -> int: torch.manual_seed(0 ) a : Any = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } a : Dict = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ ) return model @property def __a ( self ) -> str: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __a ( self ) -> Optional[Any]: torch.manual_seed(0 ) a : List[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __a ( self ) -> Any: a : int = self.dummy_text_encoder a : str = self.dummy_tokenizer a : Dict = self.dummy_unet a : Dict = self.dummy_movq a : Optional[Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , prediction_type="epsilon" , thresholding=SCREAMING_SNAKE_CASE__ , ) a : int = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Any: a : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) a : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE__ ) # create init_image a : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] a : List[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("RGB" ).resize((256, 256) ) # create mask a : Tuple = np.ones((64, 64) , dtype=np.floataa ) a : List[str] = 0 if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ): a : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a : Any = { '''prompt''': '''horse''', '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def __a ( self ) -> int: a : int = '''cpu''' a : Union[str, Any] = self.get_dummy_components() a : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) a : Union[str, Any] = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a : int = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) a : Optional[int] = output.images a : Optional[int] = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0] a : Optional[int] = image[0, -3:, -3:, -1] a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) a : Any = np.array( [0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __a ( self ) -> List[str]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> List[str]: a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) a : str = np.ones((768, 768) , dtype=np.floataa ) a : List[Any] = 0 a : Optional[Any] = '''a hat''' a : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(SCREAMING_SNAKE_CASE__ ) a : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) a : List[Any] = pipeline.to(SCREAMING_SNAKE_CASE__ ) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) a : Any = pipe_prior( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : Union[str, Any] = pipeline( SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
633
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def A_ ( snake_case__ ) -> str: return 1 / (1 + np.exp(-z )) def A_ ( snake_case__ , snake_case__ ) -> str: return (-y * np.log(snake_case__ ) - (1 - y) * np.log(1 - h )).mean() def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: _UpperCamelCase :Tuple = np.dot(snake_case__ , snake_case__ ) return np.sum(y * scores - np.log(1 + np.exp(snake_case__ ) ) ) def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=7_00_00 ) -> Optional[int]: _UpperCamelCase :Union[str, Any] = np.zeros(x.shape[1] ) for iterations in range(snake_case__ ): _UpperCamelCase :Optional[int] = np.dot(snake_case__ , snake_case__ ) _UpperCamelCase :Tuple = sigmoid_function(snake_case__ ) _UpperCamelCase :Optional[Any] = np.dot(x.T , h - y ) / y.size _UpperCamelCase :int = theta - alpha * gradient # updating the weights _UpperCamelCase :Union[str, Any] = np.dot(snake_case__ , snake_case__ ) _UpperCamelCase :Dict = sigmoid_function(snake_case__ ) _UpperCamelCase :Optional[int] = cost_function(snake_case__ , snake_case__ ) if iterations % 1_00 == 0: print(f"loss: {j} \t" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ :Union[str, Any] = datasets.load_iris() UpperCamelCase__ :Dict = iris.data[:, :2] UpperCamelCase__ :Any = (iris.target != 0) * 1 UpperCamelCase__ :Tuple = 0.1 UpperCamelCase__ :List[str] = logistic_reg(alpha, x, y, max_iterations=70_000) print("""theta: """, theta) # printing the theta i.e our weights vector def A_ ( snake_case__ ) -> Optional[Any]: return sigmoid_function( np.dot(snake_case__ , snake_case__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") ((UpperCamelCase__) , (UpperCamelCase__)) :Union[str, Any] = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) :Tuple = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) :Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ :List[Any] = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ :List[Any] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
355
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __A : '''simple docstring''' def __init__( self : Tuple ,_snake_case : Tuple ,_snake_case : Dict=13 ,_snake_case : Optional[int]=7 ,_snake_case : List[str]=True ,_snake_case : Optional[Any]=True ,_snake_case : str=False ,_snake_case : Optional[int]=True ,_snake_case : int=99 ,_snake_case : int=32 ,_snake_case : str=5 ,_snake_case : Any=4 ,_snake_case : str=37 ,_snake_case : str="gelu" ,_snake_case : Optional[Any]=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : str=512 ,_snake_case : Dict=16 ,_snake_case : Dict=2 ,_snake_case : Tuple=0.02 ,_snake_case : int=3 ,_snake_case : Optional[int]=4 ,_snake_case : int=None ,) -> Tuple: """simple docstring""" lowercase__ : Optional[Any] = parent lowercase__ : List[str] = batch_size lowercase__ : str = seq_length lowercase__ : Tuple = is_training lowercase__ : List[str] = use_input_mask lowercase__ : Optional[Any] = use_token_type_ids lowercase__ : str = use_labels lowercase__ : Any = vocab_size lowercase__ : str = hidden_size lowercase__ : int = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Optional[int] = intermediate_size lowercase__ : Dict = hidden_act lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : Optional[int] = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Any = type_sequence_label_size lowercase__ : List[str] = initializer_range lowercase__ : Tuple = num_labels lowercase__ : int = num_choices lowercase__ : Optional[int] = scope def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase__ : Optional[int] = None if self.use_input_mask: lowercase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowercase__ : Optional[Any] = None lowercase__ : Any = None lowercase__ : List[Any] = None if self.use_labels: lowercase__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase__ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices ) lowercase__ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self : int ) -> List[str]: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,use_stable_embedding=_snake_case ,) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Tuple ,_snake_case : Tuple ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[int] = OpenLlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ) lowercase__ : Dict = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : str ,_snake_case : List[str] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,) -> Optional[Any]: """simple docstring""" lowercase__ : List[str] = True lowercase__ : Tuple = OpenLlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Dict = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,) lowercase__ : str = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,) lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Any ,_snake_case : List[Any] ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Union[str, Any] ,) -> Dict: """simple docstring""" lowercase__ : int = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : str ,_snake_case : int ,_snake_case : List[Any] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,) -> int: """simple docstring""" lowercase__ : List[Any] = True lowercase__ : Tuple = True lowercase__ : Optional[int] = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowercase__ : List[str] = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,) lowercase__ : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase__ : Tuple = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase__ : str = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase__ : Tuple = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase__ : Tuple = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0] lowercase__ : Union[str, Any] = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0] # select random slice lowercase__ : Optional[int] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase__ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : str = config_and_inputs lowercase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) lowerCAmelCase : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () lowerCAmelCase : int = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : Any = False def UpperCAmelCase ( self : Dict ) -> int: """simple docstring""" lowercase__ : Tuple = OpenLlamaModelTester(self ) lowercase__ : Tuple = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 ) def UpperCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Any ) -> Dict: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ : int = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[Any] = 3 lowercase__ : List[str] = input_dict['''input_ids'''] lowercase__ : Optional[Any] = input_ids.ne(1 ).to(_snake_case ) lowercase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowercase__ : Dict = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[int] = 3 lowercase__ : str = '''single_label_classification''' lowercase__ : Optional[Any] = input_dict['''input_ids'''] lowercase__ : Any = input_ids.ne(1 ).to(_snake_case ) lowercase__ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowercase__ : Any = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[int] = 3 lowercase__ : Tuple = '''multi_label_classification''' lowercase__ : str = input_dict['''input_ids'''] lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(_snake_case ) lowercase__ : List[Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def UpperCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size ) lowercase__ : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : Optional[int] = OpenLlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowercase__ : List[Any] = original_model(_snake_case ).last_hidden_state lowercase__ : Optional[Any] = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : Optional[int] = {'''type''': scaling_type, '''factor''': 10.0} lowercase__ : Optional[int] = OpenLlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowercase__ : Optional[int] = scaled_model(_snake_case ).last_hidden_state lowercase__ : Optional[Any] = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
122
"""simple docstring""" import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __A : '''simple docstring''' def __init__( self : Optional[Any] ,_snake_case : Any ,_snake_case : str=13 ,_snake_case : int=64 ,_snake_case : Dict=2 ,_snake_case : int=3 ,_snake_case : Optional[Any]=True ,_snake_case : List[str]=True ,_snake_case : Dict=32 ,_snake_case : int=5 ,_snake_case : Any=4 ,_snake_case : Optional[int]=37 ,_snake_case : Dict="gelu" ,_snake_case : Union[str, Any]=0.1 ,_snake_case : List[Any]=0.1 ,_snake_case : int=10 ,_snake_case : Any=0.02 ,_snake_case : List[str]=[1, 16, 4, 4] ,_snake_case : str=None ,) -> List[str]: """simple docstring""" lowercase__ : Optional[int] = parent lowercase__ : Tuple = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Dict = patch_size lowercase__ : Dict = num_channels lowercase__ : str = is_training lowercase__ : Optional[int] = use_labels lowercase__ : Dict = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : str = type_sequence_label_size lowercase__ : Tuple = initializer_range lowercase__ : Union[str, Any] = scope lowercase__ : Optional[Any] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size lowercase__ : List[str] = (self.image_size // 32) ** 2 lowercase__ : List[str] = num_patches + 1 def UpperCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : List[str] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : int ) -> Tuple: """simple docstring""" lowercase__ : str = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=_snake_case ,) def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : str ,_snake_case : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase__ : int = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : str = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Dict ) -> List[str]: """simple docstring""" lowercase__ : List[str] = self.type_sequence_label_size lowercase__ : str = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : List[str] = model(_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : Any = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : List[Any] = config_and_inputs lowercase__ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () lowerCAmelCase : Optional[int] = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : Optional[Any] = False lowerCAmelCase : Any = False lowerCAmelCase : Optional[int] = False def UpperCAmelCase ( self : int ) -> Tuple: """simple docstring""" lowercase__ : str = ViTHybridModelTester(self ) lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 ) def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" pass def UpperCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = model_class(_snake_case ) lowercase__ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str = [*signature.parameters.keys()] lowercase__ : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCAmelCase ( self : Any ) -> str: """simple docstring""" lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def UpperCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[Any] = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: lowercase__ : Dict = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": lowercase__ : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @slow def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[int] = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __UpperCAmelCase ( ) -> Dict: lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) lowercase__ : Union[str, Any] = self.default_image_processor lowercase__ : Any = prepare_img() lowercase__ : Optional[Any] = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): lowercase__ : Optional[int] = model(**_snake_case ) # verify the logits lowercase__ : Any = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_snake_case ) lowercase__ : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) ) @slow @require_accelerate def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ : Dict = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) lowercase__ : Dict = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' ,device_map='''auto''' ) lowercase__ : Optional[int] = prepare_img() lowercase__ : List[str] = image_processor(images=_snake_case ,return_tensors='''pt''' ) lowercase__ : Union[str, Any] = model(**_snake_case ) lowercase__ : List[str] = outputs.logits # model predicts one of the 1000 ImageNet classes lowercase__ : List[str] = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] ,'''tabby, tabby cat''' )
122
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
467
'''simple docstring''' class UpperCAmelCase : def __init__( self : Union[str, Any] ): UpperCAmelCase__ :dict[str, TrieNode] = {} # Mapping from char to TrieNode UpperCAmelCase__ :Union[str, Any] = False def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : list[str] ): for word in words: self.insert(__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : str ): UpperCAmelCase__ :Tuple = self for char in word: if char not in curr.nodes: UpperCAmelCase__ :Optional[int] = TrieNode() UpperCAmelCase__ :Dict = curr.nodes[char] UpperCAmelCase__ :Optional[int] = True def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : str ): UpperCAmelCase__ :str = self for char in word: if char not in curr.nodes: return False UpperCAmelCase__ :Dict = curr.nodes[char] return curr.is_leaf def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : str ): def _delete(__lowerCamelCase : TrieNode , __lowerCamelCase : str , __lowerCamelCase : int ) -> bool: if index == len(__lowerCamelCase ): # If word does not exist if not curr.is_leaf: return False UpperCAmelCase__ :int = False return len(curr.nodes ) == 0 UpperCAmelCase__ :str = word[index] UpperCAmelCase__ :Optional[Any] = curr.nodes.get(__lowerCamelCase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCAmelCase__ :Any = _delete(__lowerCamelCase , __lowerCamelCase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , __lowerCamelCase , 0 ) def a__ ( UpperCamelCase_ : TrieNode, UpperCamelCase_ : str ): if node.is_leaf: print(UpperCamelCase_, end=''' ''' ) for key, value in node.nodes.items(): print_words(UpperCamelCase_, word + key ) def a__ ( ): UpperCAmelCase__ :Union[str, Any] = '''banana bananas bandana band apple all beast'''.split() UpperCAmelCase__ :Union[str, Any] = TrieNode() root.insert_many(UpperCamelCase_ ) # print_words(root, "") assert all(root.find(UpperCamelCase_ ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : bool ): print(str(UpperCamelCase_ ), '''works!''' if passes else '''doesn\'t work :(''' ) def a__ ( ): assert test_trie() def a__ ( ): print_results('''Testing trie functionality''', test_trie() ) if __name__ == "__main__": main()
467
1
from __future__ import annotations def _lowerCamelCase ( _a ): """simple docstring""" _lowerCamelCase = 0.00 _lowerCamelCase = 0 for resistor in resistors: if resistor <= 0: _lowerCamelCase = F'''Resistor at index {index} has a negative or zero value!''' raise ValueError(_a ) first_sum += 1 / float(_a ) index += 1 return 1 / first_sum def _lowerCamelCase ( _a ): """simple docstring""" _lowerCamelCase = 0.00 _lowerCamelCase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: _lowerCamelCase = F'''Resistor at index {index} has a negative value!''' raise ValueError(_a ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
297
from maths.prime_factors import prime_factors def _lowerCamelCase ( _a ): """simple docstring""" if not isinstance(_a , _a ): _lowerCamelCase = F'''Input value of [number={number}] must be an integer''' raise TypeError(_a ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(_a ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
297
1
from typing import TYPE_CHECKING from ..utils import _LazyModule a__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
14
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase__( A ): if "model" in orig_key: snake_case__ : Any = orig_key.replace('model.' , '' ) if "norm1" in orig_key: snake_case__ : Optional[int] = orig_key.replace('norm1' , 'attention.output.LayerNorm' ) if "norm2" in orig_key: snake_case__ : Tuple = orig_key.replace('norm2' , 'output.LayerNorm' ) if "norm" in orig_key: snake_case__ : List[Any] = orig_key.replace('norm' , 'LayerNorm' ) if "transformer" in orig_key: snake_case__ : Tuple = orig_key.split('.' )[0].split('_' )[-1] snake_case__ : Optional[Any] = orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: snake_case__ : Union[str, Any] = orig_key.replace('mha.attn' , 'attention.self' ) if "mha" in orig_key: snake_case__ : Optional[Any] = orig_key.replace('mha' , 'attention' ) if "W_q" in orig_key: snake_case__ : Optional[int] = orig_key.replace('W_q' , 'self.query' ) if "W_k" in orig_key: snake_case__ : List[Any] = orig_key.replace('W_k' , 'self.key' ) if "W_v" in orig_key: snake_case__ : str = orig_key.replace('W_v' , 'self.value' ) if "ff1" in orig_key: snake_case__ : int = orig_key.replace('ff1' , 'intermediate.dense' ) if "ff2" in orig_key: snake_case__ : str = orig_key.replace('ff2' , 'output.dense' ) if "ff" in orig_key: snake_case__ : Union[str, Any] = orig_key.replace('ff' , 'output.dense' ) if "mlm_class" in orig_key: snake_case__ : int = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' ) if "mlm" in orig_key: snake_case__ : Optional[int] = orig_key.replace('mlm' , 'cls.predictions.transform' ) if "cls" not in orig_key: snake_case__ : Optional[int] = 'yoso.' + orig_key return orig_key def lowercase__( A , A ): for key in orig_state_dict.copy().keys(): snake_case__ : Optional[Any] = orig_state_dict.pop(A ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case__ : Optional[Any] = val snake_case__ : Tuple = orig_state_dict['cls.predictions.decoder.bias'] snake_case__ : Optional[Any] = torch.arange(A ).expand((1, -1) ) + 2 return orig_state_dict def lowercase__( A , A , A ): snake_case__ : Tuple = torch.load(A , map_location='cpu' )['model_state_dict'] snake_case__ : Union[str, Any] = YosoConfig.from_json_file(A ) snake_case__ : Optional[int] = YosoForMaskedLM(A ) snake_case__ : str = convert_checkpoint_helper(config.max_position_embeddings , A ) print(model.load_state_dict(A ) ) model.eval() model.save_pretrained(A ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCamelCase : Optional[Any] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
170
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case =logging.get_logger(__name__) __snake_case ={ """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class UpperCAmelCase_ ( __lowercase , __lowercase ): lowerCamelCase : Optional[int] = '''focalnet''' def __init__( self : Any , UpperCAmelCase__ : Optional[Any]=2_2_4 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Dict=9_6 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Dict=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , UpperCAmelCase__ : Any=[2, 2, 6, 2] , UpperCAmelCase__ : int=[2, 2, 2, 2] , UpperCAmelCase__ : Union[str, Any]=[3, 3, 3, 3] , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Optional[Any]=4.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Any=1E-4 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=1E-5 , UpperCAmelCase__ : Optional[int]=3_2 , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Any , ) -> Any: super().__init__(**UpperCAmelCase__ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = use_conv_embed lowerCAmelCase = hidden_sizes lowerCAmelCase = depths lowerCAmelCase = focal_levels lowerCAmelCase = focal_windows lowerCAmelCase = hidden_act lowerCAmelCase = mlp_ratio lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = use_layerscale lowerCAmelCase = layerscale_value lowerCAmelCase = use_post_layernorm lowerCAmelCase = use_post_layernorm_in_modulation lowerCAmelCase = normalize_modulator lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = encoder_stride lowerCAmelCase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
513
'''simple docstring''' def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Tuple ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCAmelCase = (boundary[1] - boundary[0]) / steps lowerCAmelCase = boundary[0] lowerCAmelCase = boundary[1] lowerCAmelCase = make_points(lowerCamelCase , lowerCamelCase , lowerCamelCase ) lowerCAmelCase = 0.0 y += (h / 2.0) * f(lowerCamelCase ) for i in x_i: # print(i) y += h * f(lowerCamelCase ) y += (h / 2.0) * f(lowerCamelCase ) return y def a_ ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ): lowerCAmelCase = a + h while x < (b - h): yield x lowerCAmelCase = x + h def a_ ( lowerCamelCase : Optional[Any] ): # enter your function here lowerCAmelCase = (x - 0) * (x - 0) return y def a_ ( ): lowerCAmelCase = 0.0 # Lower bound of integration lowerCAmelCase = 1.0 # Upper bound of integration lowerCAmelCase = 10.0 # define number of steps or resolution lowerCAmelCase = [a, b] # define boundary of integration lowerCAmelCase = method_a(lowerCamelCase , lowerCamelCase ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
513
1
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="attention" ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = params[F'{prefix}/layers_{i}/{layer_name}/key/kernel'] SCREAMING_SNAKE_CASE_ : str = params[F'{prefix}/layers_{i}/{layer_name}/out/kernel'] SCREAMING_SNAKE_CASE_ : Optional[int] = params[F'{prefix}/layers_{i}/{layer_name}/query/kernel'] SCREAMING_SNAKE_CASE_ : Union[str, Any] = params[F'{prefix}/layers_{i}/{layer_name}/value/kernel'] return k, o, q, v def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=False ) -> Dict: """simple docstring""" if split_mlp_wi: SCREAMING_SNAKE_CASE_ : Union[str, Any] = params[F'{prefix}/layers_{i}/mlp/wi_0/kernel'] SCREAMING_SNAKE_CASE_ : int = params[F'{prefix}/layers_{i}/mlp/wi_1/kernel'] SCREAMING_SNAKE_CASE_ : Any = (wi_a, wi_a) else: SCREAMING_SNAKE_CASE_ : int = params[F'{prefix}/layers_{i}/mlp/wi/kernel'] SCREAMING_SNAKE_CASE_ : str = params[F'{prefix}/layers_{i}/mlp/wo/kernel'] return wi, wo def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ) -> Any: """simple docstring""" return params[F'{prefix}/layers_{i}/{layer_name}/scale'] def __UpperCAmelCase ( lowerCamelCase_ : dict , *, lowerCamelCase_ : int , lowerCamelCase_ : bool ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = traverse_util.flatten_dict(variables['target'] ) SCREAMING_SNAKE_CASE_ : str = {'/'.join(lowerCamelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi SCREAMING_SNAKE_CASE_ : Tuple = 'encoder/layers_0/mlp/wi_0/kernel' in old print('Split MLP:' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = collections.OrderedDict() # Shared embeddings. SCREAMING_SNAKE_CASE_ : Union[str, Any] = old['token_embedder/embedding'] # Encoder. for i in range(lowerCamelCase_ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'pre_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'attention' ) SCREAMING_SNAKE_CASE_ : List[str] = layer_norm SCREAMING_SNAKE_CASE_ : List[Any] = k.T SCREAMING_SNAKE_CASE_ : Any = o.T SCREAMING_SNAKE_CASE_ : List[str] = q.T SCREAMING_SNAKE_CASE_ : List[Any] = v.T # Block i, layer 1 (MLP). SCREAMING_SNAKE_CASE_ : Dict = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ : Optional[Any] = wi[0].T SCREAMING_SNAKE_CASE_ : str = wi[1].T else: SCREAMING_SNAKE_CASE_ : List[str] = wi.T SCREAMING_SNAKE_CASE_ : Tuple = wo.T SCREAMING_SNAKE_CASE_ : str = old[ 'encoder/relpos_bias/rel_embedding' ].T SCREAMING_SNAKE_CASE_ : Any = old['encoder/encoder_norm/scale'] if not is_encoder_only: # Decoder. for i in range(lowerCamelCase_ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_self_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'self_attention' ) SCREAMING_SNAKE_CASE_ : int = layer_norm SCREAMING_SNAKE_CASE_ : Any = k.T SCREAMING_SNAKE_CASE_ : Tuple = o.T SCREAMING_SNAKE_CASE_ : Optional[Any] = q.T SCREAMING_SNAKE_CASE_ : Any = v.T # Block i, layer 1 (Cross Attention). SCREAMING_SNAKE_CASE_ : Dict = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'encoder_decoder_attention' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm SCREAMING_SNAKE_CASE_ : str = k.T SCREAMING_SNAKE_CASE_ : List[str] = o.T SCREAMING_SNAKE_CASE_ : Union[str, Any] = q.T SCREAMING_SNAKE_CASE_ : List[str] = v.T # Block i, layer 2 (MLP). SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ : Optional[int] = wi[0].T SCREAMING_SNAKE_CASE_ : Dict = wi[1].T else: SCREAMING_SNAKE_CASE_ : Optional[Any] = wi.T SCREAMING_SNAKE_CASE_ : Any = wo.T SCREAMING_SNAKE_CASE_ : Tuple = old['decoder/decoder_norm/scale'] SCREAMING_SNAKE_CASE_ : Optional[int] = old[ 'decoder/relpos_bias/rel_embedding' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: SCREAMING_SNAKE_CASE_ : Any = old['decoder/logits_dense/kernel'].T return new def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : bool ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ : str = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) SCREAMING_SNAKE_CASE_ : Dict = state_dict['shared.weight'] return state_dict def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = checkpoints.load_tax_checkpoint(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Dict = convert_tax_to_pytorch(lowerCamelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = make_state_dict(lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : bool = False ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = TaConfig.from_json_file(lowerCamelCase_ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(lowerCamelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowerCamelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(lowerCamelCase_ ) print('Done' ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) UpperCamelCase__ : Dict = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
105
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _a ( A__ ): """simple docstring""" snake_case ="""EncodecFeatureExtractor""" snake_case =("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , _snake_case , _snake_case ): super().__init__(_snake_case , _snake_case ) _UpperCAmelCase =self.feature_extractor _UpperCAmelCase =False def SCREAMING_SNAKE_CASE ( self , _snake_case=None , _snake_case=None , _snake_case=True ): return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case ) def __call__( self , *_snake_case , **_snake_case ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_snake_case , **_snake_case ) _UpperCAmelCase =kwargs.pop("audio" , _snake_case ) _UpperCAmelCase =kwargs.pop("sampling_rate" , _snake_case ) _UpperCAmelCase =kwargs.pop("text" , _snake_case ) if len(_snake_case ) > 0: _UpperCAmelCase =args[0] _UpperCAmelCase =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _UpperCAmelCase =self.tokenizer(_snake_case , **_snake_case ) if audio is not None: _UpperCAmelCase =self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case ) if audio is None: return inputs elif text is None: return audio_inputs else: _UpperCAmelCase =audio_inputs["input_values"] if "padding_mask" in audio_inputs: _UpperCAmelCase =audio_inputs["padding_mask"] return inputs def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ): _UpperCAmelCase =kwargs.pop("audio" , _snake_case ) _UpperCAmelCase =kwargs.pop("padding_mask" , _snake_case ) if len(_snake_case ) > 0: _UpperCAmelCase =args[0] _UpperCAmelCase =args[1:] if audio_values is not None: return self._decode_audio(_snake_case , padding_mask=_snake_case ) else: return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ): return self.tokenizer.decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ): _UpperCAmelCase =to_numpy(_snake_case ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =audio_values.shape if padding_mask is None: return list(_snake_case ) _UpperCAmelCase =to_numpy(_snake_case ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _UpperCAmelCase =seq_len - padding_mask.shape[-1] _UpperCAmelCase =1 - self.feature_extractor.padding_value _UpperCAmelCase =np.pad(_snake_case , ((0, 0), (0, difference)) , "constant" , constant_values=_snake_case ) _UpperCAmelCase =audio_values.tolist() for i in range(_snake_case ): _UpperCAmelCase =np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _UpperCAmelCase =sliced_audio.reshape(_snake_case , -1 ) return audio_values
408
0
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a :Union[str, Any] = 10 def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: for i in range(__lowerCAmelCase , __lowerCAmelCase ): if array[i] == target: return i return -1 def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] = 0 SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase ) while left <= right: if right - left < precision: return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE__ : int = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: SCREAMING_SNAKE_CASE__ : List[Any] = one_third - 1 elif array[two_third] < target: SCREAMING_SNAKE_CASE__ : str = two_third + 1 else: SCREAMING_SNAKE_CASE__ : List[Any] = one_third + 1 SCREAMING_SNAKE_CASE__ : Optional[int] = two_third - 1 else: return -1 def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: if left < right: if right - left < precision: return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE__ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(__lowerCAmelCase , one_third - 1 , __lowerCAmelCase , __lowerCAmelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , __lowerCAmelCase , __lowerCAmelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() a :Union[str, Any] = input("Enter numbers separated by comma:\n").strip() a :Tuple = [int(item.strip()) for item in user_input.split(",")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." a :Union[str, Any] = int(input("Enter the number to be found in the list:\n").strip()) a :Dict = ite_ternary_search(collection, target) a :str = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'Iterative search: {target} found at positions: {resulta}') print(f'Recursive search: {target} found at positions: {resulta}') else: print("Not found")
709
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a :Optional[Any] = logging.get_logger(__name__) a :Union[str, Any] = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = """t5""" _SCREAMING_SNAKE_CASE :List[str] = ["""past_key_values"""] _SCREAMING_SNAKE_CASE :Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , _a=32_128 , _a=512 , _a=64 , _a=2_048 , _a=6 , _a=None , _a=8 , _a=32 , _a=128 , _a=0.1 , _a=1E-6 , _a=1.0 , _a="relu" , _a=True , _a=True , _a=0 , _a=1 , **_a , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = d_model SCREAMING_SNAKE_CASE__ : int = d_kv SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_ff SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_layers SCREAMING_SNAKE_CASE__ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry SCREAMING_SNAKE_CASE__ : Tuple = num_heads SCREAMING_SNAKE_CASE__ : Dict = relative_attention_num_buckets SCREAMING_SNAKE_CASE__ : str = relative_attention_max_distance SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_factor SCREAMING_SNAKE_CASE__ : Tuple = feed_forward_proj SCREAMING_SNAKE_CASE__ : str = use_cache SCREAMING_SNAKE_CASE__ : List[str] = self.feed_forward_proj.split("""-""" ) SCREAMING_SNAKE_CASE__ : Dict = act_info[-1] SCREAMING_SNAKE_CASE__ : str = act_info[0] == """gated""" if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": SCREAMING_SNAKE_CASE__ : List[Any] = """gelu_new""" super().__init__( pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , ) class __a (UpperCamelCase_): '''simple docstring''' @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: SCREAMING_SNAKE_CASE__ : Tuple = """past_encoder_sequence + sequence""" SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch"""} SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """decoder_sequence"""} SCREAMING_SNAKE_CASE__ : Dict = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_a , direction="""inputs""" ) return common_inputs @property def _a ( self ) -> int: """simple docstring""" return 13
12
0
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class snake_case_ : '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = None __UpperCamelCase = 1 __UpperCamelCase = None __UpperCamelCase = False __UpperCamelCase = None __UpperCamelCase = None def UpperCAmelCase ( self : int ) -> "DownloadConfig": '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__lowerCamelCase ) for k, v in self.__dict__.items()} )
375
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> int: # Return True if there is node that has not iterated. __lowercase = [False] * len(snake_case ) __lowercase = [] queue.append(snake_case ) __lowercase = True while queue: __lowercase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(snake_case ) __lowercase = True __lowercase = u return visited[t] def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Dict: # This array is filled by BFS and to store path __lowercase = [-1] * (len(snake_case )) __lowercase = 0 while bfs(snake_case , snake_case , snake_case , snake_case ): __lowercase = float('Inf' ) __lowercase = sink while s != source: # Find the minimum value in select path __lowercase = min(snake_case , graph[parent[s]][s] ) __lowercase = parent[s] max_flow += path_flow __lowercase = sink while v != source: __lowercase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase = parent[v] return max_flow SCREAMING_SNAKE_CASE_ : Any = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Optional[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
375
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'deit' def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = qkv_bias lowercase__ = encoder_stride class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> float: """simple docstring""" return 1E-4
45
from PIL import Image def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image: def brightness(_SCREAMING_SNAKE_CASE ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 lowercase_ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
45
1
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
339
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( _UpperCAmelCase, unittest.TestCase ): a_ =LongformerTokenizer a_ =True a_ =LongformerTokenizerFast a_ =True def UpperCAmelCase ( self )-> int: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCAmelCase__ = {"unk_token": "<unk>"} lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__UpperCAmelCase ) ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]: '''simple docstring''' lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = "lower newer" return input_text, output_text def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ = tokens + [tokenizer.unk_token] lowerCAmelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' lowerCAmelCase__ = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.encode( "sequence builders" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = "Encode this sequence." lowerCAmelCase__ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) # Testing spaces after special tokens lowerCAmelCase__ = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) lowerCAmelCase__ = "Encode <mask> sequence" lowerCAmelCase__ = "Encode <mask>sequence" lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ = encoded.index(__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ = encoded.index(__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' pass def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ = "A, <mask> AllenNLP sentence." lowerCAmelCase__ = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def UpperCAmelCase ( self )-> Any: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __UpperCAmelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , __UpperCAmelCase ) self.assertEqual(post_processor_state["trim_offsets"] , __UpperCAmelCase ) def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name` lowerCAmelCase__ = F"{text_of_1_token} {text_of_1_token}" lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) lowerCAmelCase__ = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) lowerCAmelCase__ = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
339
1
'''simple docstring''' # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests __A =open # noqa: we just need to have a builtin inside this module to test it properly
113
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __A =logging.getLogger() def _UpperCamelCase ( ): UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) UpperCAmelCase__ : Optional[Any] = parser.parse_args() return args.f class _snake_case ( a__ ): def snake_case__ ( self): UpperCAmelCase__ : Tuple = logging.StreamHandler(sys.stdout) logger.addHandler(_lowerCamelCase) def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : Any = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , """run_glue_deebert.py""") with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_lowerCamelCase , 0.666) @slow @require_torch_non_multi_gpu def snake_case__ ( self): UpperCAmelCase__ : List[str] = """ --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(_lowerCamelCase) UpperCAmelCase__ : Dict = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(_lowerCamelCase) UpperCAmelCase__ : Any = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(_lowerCamelCase)
113
1
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Union[str, Any]) -> float: """simple docstring""" _validate_point(a__) _validate_point(a__) if len(a__) != len(a__): raise ValueError("""Both points must be in the same n-dimensional space""") return float(sum(abs(a - b) for a, b in zip(a__ , a__))) def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> None: """simple docstring""" if point: if isinstance(a__ , a__): for item in point: if not isinstance(a__ , (int, float)): a__ : Optional[Any] = ( """Expected a list of numbers as input, found """ F'''{type(a__).__name__}''' ) raise TypeError(a__) else: a__ : Optional[int] = F'''Expected a list of numbers as input, found {type(a__).__name__}''' raise TypeError(a__) else: raise ValueError("""Missing an input""") def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : str) -> float: """simple docstring""" _validate_point(a__) _validate_point(a__) if len(a__) != len(a__): raise ValueError("""Both points must be in the same n-dimensional space""") return float(sum(abs(x - y) for x, y in zip(a__ , a__))) if __name__ == "__main__": import doctest doctest.testmod()
136
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class __A( unittest.TestCase ): def __init__( self , _snake_case ) -> Dict: '''simple docstring''' __a = parent def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return {} def __lowerCAmelCase ( ) -> Dict: __a = '''<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>''' __a = ''' <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> ''' return [html_string_a, html_string_a] @require_bsa class __A( a , unittest.TestCase ): snake_case_ = MarkupLMFeatureExtractor if is_bsa_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = MarkupLMFeatureExtractionTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return self.feature_extract_tester.prepare_feat_extract_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.feature_extraction_class() # Test not batched input __a = get_html_strings()[0] __a = feature_extractor(_snake_case ) # fmt: off __a = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] __a = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , _snake_case ) self.assertEqual(encoding.xpaths , _snake_case ) # Test batched __a = get_html_strings() __a = feature_extractor(_snake_case ) # fmt: off __a = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] __a = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , _snake_case ) self.assertEqual(encoding.xpaths , _snake_case )
219
0
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase = {} UpperCAmelCase = 2 while True: UpperCAmelCase = factor_map.pop(A , A ) if factor: UpperCAmelCase = factor + prime while x in factor_map: x += factor UpperCAmelCase = factor else: UpperCAmelCase = prime yield prime prime += 1 def lowerCamelCase__ ( A : float = 1E10 ): '''simple docstring''' UpperCAmelCase = sieve() UpperCAmelCase = 1 while True: UpperCAmelCase = next(A ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(A ) n += 2 if __name__ == "__main__": print(solution())
711
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : int = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
50
0
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def a (lowerCAmelCase__ = "isbn/0140328726" ): __a = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: __a = f'''{olid} is not a valid Open Library olid''' raise ValueError(lowerCAmelCase__ ) return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json() def a (lowerCAmelCase__ ): __a = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } __a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] __a = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __a = """, """.join(lowerCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: SCREAMING_SNAKE_CASE = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (1_0, 1_3) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: SCREAMING_SNAKE_CASE = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
99
import warnings from ..trainer import Trainer from ..utils import logging _SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class A ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : int=None , **_UpperCamelCase : Optional[int]): warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , _UpperCamelCase , ) super().__init__(args=_UpperCamelCase , **_UpperCamelCase)
226
0
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _A ( snake_case ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : Any = dataset snake_case : str = process snake_case : Dict = params def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : List[Any] = self.dataset[i] snake_case : int = self.process(SCREAMING_SNAKE_CASE_ ,**self.params ) return processed class _A ( snake_case ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ): '''simple docstring''' snake_case : List[str] = loader snake_case : Union[str, Any] = infer snake_case : List[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether snake_case : Tuple = None snake_case : Tuple = loader_batch_size # Internal bookkeeping snake_case : List[str] = None snake_case : Dict = None def __len__( self ): '''simple docstring''' return len(self.loader ) def __iter__( self ): '''simple docstring''' snake_case : Optional[Any] = iter(self.loader ) return self def snake_case_ ( self ): '''simple docstring''' if isinstance(self._loader_batch_data ,torch.Tensor ): # Batch data is simple tensor, just fetch the slice snake_case : Optional[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) snake_case : str = {} for k, element in self._loader_batch_data.items(): if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): # Convert ModelOutput to tuple first snake_case : List[str] = element.to_tuple() if isinstance(element[0] ,torch.Tensor ): snake_case : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] ,torch.Tensor ): snake_case : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): snake_case : Dict = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around snake_case : str = None elif isinstance(element[self._loader_batch_index] ,torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers snake_case : Dict = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] ,np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers snake_case : Tuple = np.expand_dims(element[self._loader_batch_index] ,0 ) else: # This is typically a list, so no need to `unsqueeze`. snake_case : str = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 snake_case : int = self._loader_batch_data.__class__(SCREAMING_SNAKE_CASE_ ) self._loader_batch_index += 1 return result def snake_case_ ( self ): '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch snake_case : int = next(self.iterator ) snake_case : Optional[int] = self.infer(SCREAMING_SNAKE_CASE_ ,**self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(SCREAMING_SNAKE_CASE_ ,torch.Tensor ): snake_case : int = processed else: snake_case : Optional[Any] = list(processed.keys() )[0] snake_case : List[str] = processed[key] if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): snake_case : Tuple = len(SCREAMING_SNAKE_CASE_ ) else: snake_case : Any = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. snake_case : str = observed_batch_size # Setting internal index to unwrap the batch snake_case : str = processed snake_case : Tuple = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _A ( snake_case ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) def __iter__( self ): '''simple docstring''' snake_case : Any = iter(self.loader ) snake_case : Optional[Any] = None return self def snake_case_ ( self ): '''simple docstring''' if self.subiterator is None: snake_case : Union[str, Any] = self.infer(next(self.iterator ) ,**self.params ) try: # Try to return next item snake_case : Any = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators snake_case : List[str] = self.infer(next(self.iterator ) ,**self.params ) snake_case : Any = next(self.subiterator ) return processed class _A ( snake_case ): '''simple docstring''' def __iter__( self ): '''simple docstring''' snake_case : str = iter(self.loader ) return self def snake_case_ ( self ): '''simple docstring''' snake_case : Optional[int] = False snake_case : str = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: snake_case : Any = self.loader_batch_item() snake_case : str = item.pop("""is_last""" ) accumulator.append(SCREAMING_SNAKE_CASE_ ) if is_last: return accumulator while not is_last: snake_case : Union[str, Any] = self.infer(next(self.iterator ) ,**self.params ) if self.loader_batch_size is not None: if isinstance(SCREAMING_SNAKE_CASE_ ,torch.Tensor ): snake_case : Optional[int] = processed else: snake_case : Any = list(processed.keys() )[0] snake_case : str = processed[key] if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): snake_case : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) else: snake_case : Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. snake_case : int = observed_batch_size snake_case : List[Any] = processed snake_case : Union[str, Any] = 0 while self._loader_batch_index < self.loader_batch_size: snake_case : Union[str, Any] = self.loader_batch_item() snake_case : str = item.pop("""is_last""" ) accumulator.append(SCREAMING_SNAKE_CASE_ ) if is_last: return accumulator else: snake_case : Dict = processed snake_case : Union[str, Any] = item.pop("""is_last""" ) accumulator.append(SCREAMING_SNAKE_CASE_ ) return accumulator class _A ( snake_case ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : Any = dataset snake_case : Optional[Any] = key def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return self.dataset[i][self.key] class _A ( snake_case ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : int = dataset snake_case : Any = keya snake_case : Any = keya def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
700
def lowercase ( __A : Union[str, Any] ) -> int: '''simple docstring''' snake_case : Dict = [0] * len(__A ) snake_case : int = [] snake_case : Optional[Any] = [1] * len(__A ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__A ) ): if indegree[i] == 0: queue.append(__A ) while queue: snake_case : Any = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: snake_case : Any = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__A ) print(max(__A ) ) # Adjacency list of Graph __lowercase : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
315
0
'''simple docstring''' from math import pi, sqrt def _lowercase ( lowerCamelCase__ : float ): if num <= 0: raise ValueError("math domain error" ) if num > 1_71.5: raise OverflowError("math range error" ) elif num - int(lowerCamelCase__ ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(lowerCamelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _lowercase ( ): assert gamma(0.5 ) == sqrt(lowerCamelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __snake_case : Union[str, Any] = 1.0 while num: __snake_case : Optional[Any] = float(input("Gamma of: ")) print(f'''gamma({num}) = {gamma(num)}''') print("\nEnter 0 to exit...")
131
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCAmelCase ( self ) -> List[Any]: _a = 1 _a = 3 _a = (3_2, 3_2) _a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ ) return image @property def __lowerCAmelCase ( self ) -> int: torch.manual_seed(0 ) _a = UNetaDConditionModel( block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=snake_case_ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , ) return model @property def __lowerCAmelCase ( self ) -> List[Any]: torch.manual_seed(0 ) _a = AutoencoderKL( block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , ) return CLIPTextModel(snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.dummy_cond_unet_upscale _a = DDPMScheduler() _a = DDIMScheduler(prediction_type="v_prediction" ) _a = self.dummy_vae _a = self.dummy_text_encoder _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk _a = StableDiffusionUpscalePipeline( unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , ) _a = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _a = "A painting of a squirrel eating a burger" _a = torch.Generator(device=snake_case_ ).manual_seed(0 ) _a = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , ) _a = output.images _a = torch.Generator(device=snake_case_ ).manual_seed(0 ) _a = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case_ , )[0] _a = image[0, -3:, -3:, -1] _a = image_from_tuple[0, -3:, -3:, -1] _a = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _a = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> int: _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.dummy_cond_unet_upscale _a = DDPMScheduler() _a = DDIMScheduler(prediction_type="v_prediction" ) _a = self.dummy_vae _a = self.dummy_text_encoder _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk _a = StableDiffusionUpscalePipeline( unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , ) _a = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _a = "A painting of a squirrel eating a burger" _a = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , ) _a = output.images assert image.shape[0] == 2 _a = torch.Generator(device=snake_case_ ).manual_seed(0 ) _a = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , ) _a = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def __lowerCAmelCase ( self ) -> Any: _a = self.dummy_cond_unet_upscale _a = DDPMScheduler() _a = DDIMScheduler(prediction_type="v_prediction" ) _a = self.dummy_vae _a = self.dummy_text_encoder _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) ) # put models in fp16, except vae as it overflows in fp16 _a = unet.half() _a = text_encoder.half() # make sure here that pndm scheduler skips prk _a = StableDiffusionUpscalePipeline( unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , ) _a = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _a = "A painting of a squirrel eating a burger" _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ).images _a = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Optional[int]: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) _a = "stabilityai/stable-diffusion-x4-upscaler" _a = StableDiffusionUpscalePipeline.from_pretrained(snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _a = "a cat sitting on a park bench" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , ) _a = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 1E-3 def __lowerCAmelCase ( self ) -> Optional[Any]: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) _a = "stabilityai/stable-diffusion-x4-upscaler" _a = StableDiffusionUpscalePipeline.from_pretrained( snake_case_ , torch_dtype=torch.floataa , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _a = "a cat sitting on a park bench" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , ) _a = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _a = "stabilityai/stable-diffusion-x4-upscaler" _a = StableDiffusionUpscalePipeline.from_pretrained( snake_case_ , torch_dtype=torch.floataa , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _a = "a cat sitting on a park bench" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=5 , output_type="np" , ) _a = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 1_0**9
131
1
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __A = datasets.utils.logging.get_logger(__name__) @dataclass class a_ ( datasets.BuilderConfig ): _snake_case = None _snake_case = "utf-8" _snake_case = None _snake_case = None _snake_case = True # deprecated _snake_case = None # deprecated _snake_case = 10 << 20 # 10MB _snake_case = None class a_ ( datasets.ArrowBasedBuilder ): _snake_case = JsonConfig def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') __snake_case : Tuple = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""") __snake_case : Optional[int] = dl_manager.download_and_extract(self.config.data_files) if isinstance(__a , (str, list, tuple)): __snake_case : Tuple = data_files if isinstance(__a , __a): __snake_case : Tuple = [files] __snake_case : Optional[int] = [dl_manager.iter_files(__a) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] __snake_case : Dict = [] for split_name, files in data_files.items(): if isinstance(__a , __a): __snake_case : List[str] = [files] __snake_case : Any = [dl_manager.iter_files(__a) for file in files] splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={'files': files})) return splits def SCREAMING_SNAKE_CASE__ (self , __a) -> pa.Table: """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): __snake_case : Any = self.config.features.arrow_schema.field(__a).type __snake_case : Any = pa_table.append_column(__a , pa.array([None] * len(__a) , type=__a)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : Optional[Any] = table_cast(__a , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(__a)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__a , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: __snake_case : Union[str, Any] = json.load(__a) # We keep only the field we are interested in __snake_case : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__a , (list, tuple)): __snake_case : Optional[Any] = set().union(*[row.keys() for row in dataset]) __snake_case : List[str] = {col: [row.get(__a) for row in dataset] for col in keys} else: __snake_case : Tuple = dataset __snake_case : Optional[int] = pa.Table.from_pydict(__a) yield file_idx, self._cast_table(__a) # If the file has one json object per line else: with open(__a , 'rb') as f: __snake_case : Any = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small __snake_case : List[Any] = max(self.config.chunksize // 3_2 , 1_6 << 1_0) __snake_case : Union[str, Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: __snake_case : str = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__a) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": __snake_case : int = batch.decode(self.config.encoding , errors=__a).encode('utf-8') try: while True: try: __snake_case : Tuple = paj.read_json( io.BytesIO(__a) , read_options=paj.ReadOptions(block_size=__a)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__a , pa.ArrowInvalid) and "straddling" not in str(__a) or block_size > len(__a) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(__a)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( __a , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: __snake_case : str = json.load(__a) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(__a)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__a , __a): # list is the only sequence type supported in JSON try: __snake_case : List[Any] = set().union(*[row.keys() for row in dataset]) __snake_case : Union[str, Any] = {col: [row.get(__a) for row in dataset] for col in keys} __snake_case : List[Any] = pa.Table.from_pydict(__a) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(__a)}: {e}""") raise ValueError(F"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(__a) break else: logger.error(F"""Failed to read file '{file}' with error {type(__a)}: {e}""") raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__a) batch_idx += 1
61
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
1
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" a_ : int = [False] * len(SCREAMING_SNAKE_CASE_ ) a_ : Tuple = [] queue.append(SCREAMING_SNAKE_CASE_ ) a_ : int = True while queue: a_ : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(SCREAMING_SNAKE_CASE_ ) a_ : Dict = True a_ : Optional[Any] = u return visited[t] def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" a_ : List[str] = [-1] * (len(SCREAMING_SNAKE_CASE_ )) a_ : Optional[Any] = 0 while bfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): a_ : List[Any] = float("""Inf""" ) a_ : List[str] = sink while s != source: # Find the minimum value in select path a_ : Any = min(SCREAMING_SNAKE_CASE_ , graph[parent[s]][s] ) a_ : Optional[Any] = parent[s] max_flow += path_flow a_ : str = sink while v != source: a_ : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow a_ : List[str] = parent[v] return max_flow SCREAMING_SNAKE_CASE : Optional[int] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
419
from ..utils import DummyObject, requires_backends class snake_case__ ( metaclass=__A ): UpperCAmelCase : Optional[int] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Dict = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : List[str] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Dict = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : int = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Dict = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Optional[int] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Optional[Any] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Optional[int] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Optional[int] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Dict = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Tuple = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : List[str] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Optional[Any] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Tuple = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : int = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : int = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Dict = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Tuple = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : int = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : str = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Tuple = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : List[str] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : List[str] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : int = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Dict = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Optional[Any] = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Any = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : int = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class snake_case__ ( metaclass=__A ): UpperCAmelCase : Any = ["""sentencepiece"""] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
419
1
"""simple docstring""" from ... import PretrainedConfig __A = { """sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""", } class a ( A_ ): A_ : int = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP A_ : Optional[int] = '''nezha''' def __init__( self : Tuple , lowerCamelCase_ : List[str]=2_11_28 , lowerCamelCase_ : str=7_68 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : Dict=30_72 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : List[str]=64 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : Dict=1E-12 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Any=True , **lowerCamelCase_ : Tuple , ) -> str: super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = max_relative_position __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout __a = use_cache
173
"""simple docstring""" def UpperCamelCase ( _lowerCAmelCase : int = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
173
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase (SCREAMING_SNAKE_CASE_ ): __A = (DDPMScheduler,) def _a ( self , **_lowerCAmelCase ) -> Any: '''simple docstring''' lowercase = { """num_train_timesteps""": 1000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**snake_case__ ) return config def _a ( self ) -> Tuple: '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def _a ( self ) -> int: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def _a ( self ) -> Optional[int]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case__ ) def _a ( self ) -> int: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def _a ( self ) -> List[Any]: '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def _a ( self ) -> Any: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def _a ( self ) -> List[str]: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case__ ) def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case__ ) lowercase = len(snake_case__ ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter lowercase = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual lowercase = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 lowercase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(snake_case__ ) ) lowercase = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def _a ( self ) -> List[str]: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowercase = scheduler_class(**snake_case__ ) lowercase = len(snake_case__ ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter lowercase = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual lowercase = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 lowercase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(snake_case__ ) ) lowercase = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def _a ( self ) -> Optional[Any]: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case__ ) lowercase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case__ ) lowercase = scheduler.timesteps for i, timestep in enumerate(snake_case__ ): if i == len(snake_case__ ) - 1: lowercase = -1 else: lowercase = timesteps[i + 1] lowercase = scheduler.previous_timestep(snake_case__ ) lowercase = prev_t.item() self.assertEqual(snake_case__ , snake_case__ ) def _a ( self ) -> int: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case__ ) lowercase = [100, 87, 50, 51, 0] with self.assertRaises(snake_case__ , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=snake_case__ ) def _a ( self ) -> Dict: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case__ ) lowercase = [100, 87, 50, 1, 0] lowercase = len(snake_case__ ) with self.assertRaises(snake_case__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ ) def _a ( self ) -> Union[str, Any]: '''simple docstring''' lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case__ ) lowercase = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=snake_case__ )
588
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 class lowerCAmelCase__ : '''simple docstring''' def __init__( self : int , snake_case__ : int ) -> Any: _lowerCamelCase = [[] for _ in range(snake_case__ )] _lowerCamelCase = size def __getitem__( self : Dict , snake_case__ : int ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def _snake_case ( self : str ) -> int: return self._size def _snake_case ( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Tuple: if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def _snake_case ( self : Dict , snake_case__ : int , snake_case__ : int ) -> int | None: _lowerCamelCase = deque([start_vertex] ) _lowerCamelCase = [None] * self.size _lowerCamelCase = 0 while queue: _lowerCamelCase = queue.popleft() _lowerCamelCase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _lowerCamelCase = current_distance + edge.weight _lowerCamelCase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue _lowerCamelCase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
544
0
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _lowercase : Any = _symbol_database.Default() _lowercase : Tuple = _descriptor_pool.Default().AddSerializedFile( b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) _lowercase : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _lowercase : Tuple = None _lowercase : Tuple = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _lowercase : Union[str, Any] = 45 _lowercase : Optional[Any] = 1581 _lowercase : Optional[int] = 1517 _lowercase : List[str] = 1570 _lowercase : int = 1584 _lowercase : Optional[Any] = 1793 _lowercase : Union[str, Any] = 1795 _lowercase : str = 1916 _lowercase : Optional[Any] = 1864 _lowercase : str = 1905 _lowercase : Any = 1919 _lowercase : str = 2429 _lowercase : Dict = 2208 _lowercase : Optional[Any] = 2418 _lowercase : List[str] = 2323 _lowercase : Any = 2407 # @@protoc_insertion_point(module_scope)
546
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase : Optional[Any] = 16 _lowercase : List[Any] = 32 def _lowerCAmelCase ( UpperCamelCase__: Accelerator , UpperCamelCase__: int = 16 ) -> Tuple: """simple docstring""" A = AutoTokenizer.from_pretrained("""bert-base-cased""" ) A = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCamelCase__: List[Any] ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCamelCase__: Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. A = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A = 16 elif accelerator.mixed_precision != "no": A = 8 else: A = None return tokenizer.pad( UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) A = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowercase : List[Any] = mocked_dataloaders # noqa: F811 def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: Any ) -> int: """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1": A = 2 # Initialize accelerator A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config["""lr"""] A = int(config["""num_epochs"""] ) A = int(config["""seed"""] ) A = int(config["""batch_size"""] ) A = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation A = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A = batch_size // MAX_GPU_BATCH_SIZE A = MAX_GPU_BATCH_SIZE set_seed(UpperCamelCase__ ) A , A = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A = model.to(accelerator.device ) # Instantiate optimizer A = AdamW(params=model.parameters() , lr=UpperCamelCase__ ) # Instantiate scheduler A = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Now we train the model for epoch in range(UpperCamelCase__ ): model.train() for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A = model(**UpperCamelCase__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() A = 0 for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**UpperCamelCase__ ) A = outputs.logits.argmax(dim=-1 ) A , A = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(UpperCamelCase__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=UpperCamelCase__ , references=UpperCamelCase__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , UpperCamelCase__ ) def _lowerCAmelCase ( ) -> Dict: """simple docstring""" A = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) A = parser.parse_args() A = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
546
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) A__ = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') A__ = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) A__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) A__ = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) A__ = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions A__ = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) A__ = tf.keras.preprocessing.image.img_to_array(test_image) A__ = np.expand_dims(test_image, axis=0) A__ = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: A__ = """Normal""" if result[0][0] == 1: A__ = """Abnormality detected"""
166
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : List[str]): # Initialise PyTorch model lowerCamelCase :int = BertConfig.from_json_file(a_) print(F"Building PyTorch model from configuration: {config}") lowerCamelCase :Union[str, Any] = BertForPreTraining(a_) # Load weights from tf checkpoint load_tf_weights_in_bert(a_ , a_ , a_) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict() , a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
166
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowercase = logging.get_logger(__name__) class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' lowerCAmelCase = ['''input_features'''] def __init__( self , a=80 , a=1_60_00 , a=1_60 , a=30 , a=4_00 , a=0.0 , a=False , **a , ) -> Dict: super().__init__( feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , ) snake_case_ = n_fft snake_case_ = hop_length snake_case_ = chunk_length snake_case_ = chunk_length * sampling_rate snake_case_ = self.n_samples // hop_length snake_case_ = sampling_rate snake_case_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ) def _UpperCamelCase ( self , a ) -> np.ndarray: snake_case_ = spectrogram( a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , ) snake_case_ = log_spec[:, :-1] snake_case_ = np.maximum(a , log_spec.max() - 8.0 ) snake_case_ = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _UpperCamelCase ( a , a , a = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: snake_case_ = np.array(a , np.intaa ) snake_case_ = [] for vector, length in zip(a , attention_mask.sum(-1 ) ): snake_case_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: snake_case_ = padding_value normed_input_values.append(a ) else: snake_case_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self , a , a = True , a = None , a = None , a = None , a = "max_length" , a = None , a = None , a = None , **a , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): snake_case_ = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray([raw_speech] ).T] snake_case_ = BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding snake_case_ = self.pad( a , padding=a , max_length=max_length if max_length else self.n_samples , truncation=a , pad_to_multiple_of=a , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: snake_case_ = self.zero_mean_unit_var_norm( padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , ) snake_case_ = np.stack(padded_inputs['input_features'] , axis=0 ) # make sure list is in array format snake_case_ = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 ) snake_case_ = [self._np_extract_fbank_features(a ) for waveform in input_features[0]] if isinstance(input_features[0] , a ): snake_case_ = [np.asarray(a , dtype=np.floataa ) for feature in input_features] else: snake_case_ = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) snake_case_ = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: snake_case_ = padded_inputs.convert_to_tensors(a ) return padded_inputs def _UpperCamelCase ( self ) -> Dict[str, Any]: snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
607
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=4 , ) -> Optional[int]: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def _UpperCamelCase ( self ) -> List[Any]: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCamelCase ( self ) -> int: snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCamelCase_ ( snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase = True lowerCAmelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCamelCase ( self ) -> Union[str, Any]: snake_case_ = FlaxRoFormerModelTester(self ) @slow def _UpperCamelCase ( self ) -> Any: for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=a ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(a ) @require_flax class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _UpperCamelCase ( self ) -> str: snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(a )[0] snake_case_ = 5_00_00 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , a ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1E-4 ) )
607
1
from ...configuration_utils import PretrainedConfig A : Optional[int] = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = '''tapas''' def __init__(self : Optional[int] , _UpperCAmelCase : int=3_0522 , _UpperCAmelCase : str=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=1024 , _UpperCAmelCase : Any=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : List[str]=10.0 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : List[Any]=1.0 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str=False , _UpperCAmelCase : int=None , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any="ratio" , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Dict , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_sizes lowercase__ = initializer_range lowercase__ = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ = positive_label_weight lowercase__ = num_aggregation_labels lowercase__ = aggregation_loss_weight lowercase__ = use_answer_as_supervision lowercase__ = answer_loss_importance lowercase__ = use_normalized_answer_loss lowercase__ = huber_loss_delta lowercase__ = temperature lowercase__ = aggregation_temperature lowercase__ = use_gumbel_for_cells lowercase__ = use_gumbel_for_aggregation lowercase__ = average_approximation_function lowercase__ = cell_selection_preference lowercase__ = answer_loss_cutoff lowercase__ = max_num_rows lowercase__ = max_num_columns lowercase__ = average_logits_per_cell lowercase__ = select_one_column lowercase__ = allow_empty_column_selection lowercase__ = init_cell_selection_weights_to_zero lowercase__ = reset_position_index_per_cell lowercase__ = disable_per_token_loss # Aggregation hyperparameters lowercase__ = aggregation_labels lowercase__ = no_aggregation_label_index if isinstance(self.aggregation_labels , _UpperCAmelCase ): lowercase__ = {int(_UpperCAmelCase ): v for k, v in aggregation_labels.items()}
15
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase_ ( ): '''simple docstring''' _a : str = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' _a : Any = Image.open(requests.get(A , stream=A ).raw ).convert('RGB' ) return image def UpperCAmelCase_ ( A ): '''simple docstring''' _a : Optional[int] = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def UpperCAmelCase_ ( A , A , A ): '''simple docstring''' _a : Dict = dct.pop(A ) _a : Tuple = val def UpperCAmelCase_ ( A , A ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _a : Optional[Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) _a : str = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict _a : Any = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) ) _a : Tuple = qkv_bias def UpperCAmelCase_ ( A ): '''simple docstring''' _a : int = 3_6_4 if 'coco' in model_name else 2_2_4 _a : List[Any] = InstructBlipVisionConfig(image_size=A ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: _a : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _a : Tuple = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: _a : int = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2_0_0_1 ).to_dict() elif "vicuna-13b" in model_name: _a : int = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2_0_0_1 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 _a : Tuple = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict() _a : Optional[Any] = InstructBlipConfig(vision_config=A , text_config=A , qformer_config=A ) return config, image_size @torch.no_grad() def UpperCAmelCase_ ( A , A=None , A=False ): '''simple docstring''' _a : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: _a : int = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) _a : Optional[Any] = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) _a , _a : Tuple = get_blipa_config(A ) _a : Optional[Any] = InstructBlipForConditionalGeneration(A ).eval() _a : Union[str, Any] = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } _a , _a : Dict = model_name_to_original[model_name] # load original model print('Loading original model...' ) _a : int = 'cuda:1' if torch.cuda.is_available() else 'cpu' _a : Any = 'cuda:2' if torch.cuda.is_available() else 'cpu' _a , _a , _a : Optional[int] = load_model_and_preprocess( name=A , model_type=A , is_eval=A , device=A ) original_model.eval() print('Done!' ) # update state dict keys _a : Tuple = original_model.state_dict() _a : List[Any] = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _a : Union[str, Any] = state_dict.pop(A ) if key.startswith('Qformer.bert' ): _a : Union[str, Any] = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: _a : List[Any] = key.replace('self' , 'attention' ) if "llm_proj" in key: _a : int = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: _a : str = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): _a : int = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): _a : Tuple = key.replace('t5' , 'language' ) _a : Optional[Any] = val # read in qv biases read_in_q_v_bias(A , A ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(A , strict=A ) _a : Dict = load_demo_image() _a : Any = 'What is unusual about this image?' # create processor _a : Any = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=A , image_std=A ) _a : Dict = InstructBlipProcessor( image_processor=A , tokenizer=A , qformer_tokenizer=A , ) _a : str = processor(images=A , text=A , return_tensors='pt' ).to(A ) # make sure processor creates exact same pixel values _a : str = vis_processors['eval'](A ).unsqueeze(0 ).to(A ) _a : Optional[int] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A ) original_model.to(A ) hf_model.to(A ) with torch.no_grad(): if "vicuna" in model_name: _a : Any = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits _a : str = hf_model(**A ).logits else: _a : Optional[Any] = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits _a : List[Any] = tokenizer('\n' , return_tensors='pt' ).input_ids.to(A ) _a : str = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 ) _a : List[str] = hf_model(**A , labels=A ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape _a : str = 1E-4 if 'vicuna' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , A , atol=A ) print('Looks ok!' ) print('Generating with original model...' ) _a : Optional[int] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) _a : Dict = hf_model.generate( **A , do_sample=A , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? _a : Tuple = 2 print('Original generation:' , A ) _a : Dict = processor.batch_decode(A , skip_special_tokens=A ) _a : Tuple = [text.strip() for text in output_text] print('HF generation:' , A ) if pytorch_dump_folder_path is not None: processor.save_pretrained(A ) hf_model.save_pretrained(A ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() UpperCAmelCase_ : List[str] = [ "instructblip-vicuna-7b", "instructblip-vicuna-13b", "instructblip-flan-t5-xl", "instructblip-flan-t5-xxl", ] parser.add_argument( "--model_name", default="instructblip-flan-t5-xl", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
120
0
'''simple docstring''' import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json', } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='align_text_model' def __init__( self : List[str] , a : List[str]=3_0522 , a : Tuple=768 , a : Dict=12 , a : Any=12 , a : Union[str, Any]=3072 , a : Tuple="gelu" , a : int=0.1 , a : Optional[Any]=0.1 , a : str=512 , a : Dict=2 , a : Union[str, Any]=0.02 , a : int=1e-12 , a : Any=0 , a : Union[str, Any]="absolute" , a : Any=True , **a : int , ) -> Tuple: """simple docstring""" super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Dict = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : str = use_cache SCREAMING_SNAKE_CASE : List[str] = pad_token_id @classmethod def __UpperCamelCase ( cls : Any , a : Tuple , **a : Union[str, Any] ) -> Dict: """simple docstring""" cls._set_token_in_kwargs(_lowerCAmelCase ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": SCREAMING_SNAKE_CASE : str = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='align_vision_model' def __init__( self : Any , a : int = 3 , a : int = 600 , a : List[Any] = 2.0 , a : str = 3.1 , a : Dict = 8 , a : Tuple = [3, 3, 5, 3, 5, 5, 3] , a : str = [32, 16, 24, 40, 80, 112, 192] , a : Union[str, Any] = [16, 24, 40, 80, 112, 192, 320] , a : Optional[Any] = [] , a : str = [1, 2, 2, 2, 1, 2, 1] , a : Dict = [1, 2, 2, 3, 3, 4, 1] , a : str = [1, 6, 6, 6, 6, 6, 6] , a : int = 0.25 , a : Union[str, Any] = "swish" , a : Dict = 2560 , a : Optional[int] = "mean" , a : int = 0.02 , a : str = 0.001 , a : Union[str, Any] = 0.99 , a : Union[str, Any] = 0.2 , **a : str , ) -> List[str]: """simple docstring""" super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : Dict = width_coefficient SCREAMING_SNAKE_CASE : str = depth_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_divisor SCREAMING_SNAKE_CASE : str = kernel_sizes SCREAMING_SNAKE_CASE : int = in_channels SCREAMING_SNAKE_CASE : int = out_channels SCREAMING_SNAKE_CASE : Tuple = depthwise_padding SCREAMING_SNAKE_CASE : List[Any] = strides SCREAMING_SNAKE_CASE : Optional[int] = num_block_repeats SCREAMING_SNAKE_CASE : Dict = expand_ratios SCREAMING_SNAKE_CASE : str = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : str = hidden_dim SCREAMING_SNAKE_CASE : Any = pooling_type SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = batch_norm_eps SCREAMING_SNAKE_CASE : int = batch_norm_momentum SCREAMING_SNAKE_CASE : List[str] = drop_connect_rate SCREAMING_SNAKE_CASE : List[Any] = sum(_lowerCAmelCase ) * 4 @classmethod def __UpperCamelCase ( cls : List[str] , a : Dict , **a : Dict ) -> List[Any]: """simple docstring""" cls._set_token_in_kwargs(_lowerCAmelCase ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": SCREAMING_SNAKE_CASE : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='align' lowerCamelCase__ =True def __init__( self : List[Any] , a : Any=None , a : Optional[Any]=None , a : List[Any]=640 , a : str=1.0 , a : str=0.02 , **a : Optional[Any] , ) -> Any: """simple docstring""" super().__init__(**_lowerCAmelCase ) if text_config is None: SCREAMING_SNAKE_CASE : List[str] = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: SCREAMING_SNAKE_CASE : List[str] = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) SCREAMING_SNAKE_CASE : str = AlignTextConfig(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE : Optional[int] = AlignVisionConfig(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE : Optional[int] = projection_dim SCREAMING_SNAKE_CASE : List[str] = temperature_init_value SCREAMING_SNAKE_CASE : List[Any] = initializer_range @classmethod def __UpperCamelCase ( cls : List[Any] , a : Optional[Any] , a : Union[str, Any] , **a : Optional[int] ) -> Any: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase ) def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Dict = self.text_config.to_dict() SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type return output
709
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='gptj' lowerCamelCase__ ={ 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Dict , a : Optional[Any]=5_0400 , a : List[str]=2048 , a : List[Any]=4096 , a : int=28 , a : Union[str, Any]=16 , a : List[Any]=64 , a : int=None , a : Optional[int]="gelu_new" , a : Optional[Any]=0.0 , a : Any=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=1e-5 , a : Any=0.02 , a : Optional[int]=True , a : Tuple=5_0256 , a : Union[str, Any]=5_0256 , a : List[Any]=False , **a : str , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : int = n_positions SCREAMING_SNAKE_CASE : Tuple = n_embd SCREAMING_SNAKE_CASE : Tuple = n_layer SCREAMING_SNAKE_CASE : List[Any] = n_head SCREAMING_SNAKE_CASE : Tuple = n_inner SCREAMING_SNAKE_CASE : Any = rotary_dim SCREAMING_SNAKE_CASE : str = activation_function SCREAMING_SNAKE_CASE : int = resid_pdrop SCREAMING_SNAKE_CASE : Optional[int] = embd_pdrop SCREAMING_SNAKE_CASE : Tuple = attn_pdrop SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon SCREAMING_SNAKE_CASE : int = initializer_range SCREAMING_SNAKE_CASE : Tuple = use_cache SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id SCREAMING_SNAKE_CASE : List[Any] = eos_token_id super().__init__( bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a ) class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Optional[int] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ) -> Any: """simple docstring""" super().__init__(a , task=a , patching_specs=a , use_past=a ) if not getattr(self._config , "pad_token_id" , a ): # TODO: how to do that better? SCREAMING_SNAKE_CASE : Dict = 0 @property def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(a , direction="inputs" ) SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "sequence"} return common_inputs @property def __UpperCamelCase ( self : Any ) -> int: """simple docstring""" return self._config.n_layer @property def __UpperCamelCase ( self : str ) -> int: """simple docstring""" return self._config.n_head def __UpperCamelCase ( self : str , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = super(a , self ).generate_dummy_inputs( a , batch_size=a , seq_length=a , is_pair=a , framework=a ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE : Any = seqlen + 2 SCREAMING_SNAKE_CASE : Dict = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE : str = [ (torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE : Optional[int] = common_inputs["attention_mask"] if self.use_past: SCREAMING_SNAKE_CASE : List[str] = ordered_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE : Any = torch.cat( [ordered_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 ) return ordered_inputs @property def __UpperCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" return 13
193
0
'''simple docstring''' def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ): """simple docstring""" def get_matched_characters(lowerCAmelCase : Optional[int] , lowerCAmelCase : int ) -> str: __magic_name__ : Optional[Any] = [] __magic_name__ : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __magic_name__ : Tuple = int(max(0 , i - limit ) ) __magic_name__ : List[Any] = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(_A ) __magic_name__ : Optional[int] = f'{_stra[0:_stra.index(_A )]} {_stra[_stra.index(_A ) + 1:]}' return "".join(_A ) # matching characters __magic_name__ : List[Any] = get_matched_characters(_A , _A ) __magic_name__ : int = get_matched_characters(_A , _A ) __magic_name__ : str = len(_A ) # transposition __magic_name__ : List[str] = ( len([(ca, ca) for ca, ca in zip(_A , _A ) if ca != ca] ) // 2 ) if not match_count: __magic_name__ : Union[str, Any] = 0.0 else: __magic_name__ : Tuple = ( 1 / 3 * ( match_count / len(_A ) + match_count / len(_A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __magic_name__ : Union[str, Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('''hello''', '''world'''))
561
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase__ ( A__ , unittest.TestCase ): """simple docstring""" a = TransfoXLTokenizer a = False a = False def lowercase_ ( self : List[str] ) -> List[Any]: super().setUp() SCREAMING_SNAKE_CASE__ = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase_ ( self : int , **__lowerCamelCase : Any ) -> List[str]: SCREAMING_SNAKE_CASE__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def lowercase_ ( self : str , __lowerCamelCase : Dict ) -> str: SCREAMING_SNAKE_CASE__ = '''<unk> UNwanted , running''' SCREAMING_SNAKE_CASE__ = '''<unk> unwanted, running''' return input_text, output_text def lowercase_ ( self : int ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(__lowerCamelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] ) def lowercase_ ( self : Dict ) -> Dict: SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def lowercase_ ( self : Tuple ) -> int: SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowercase_ ( self : Tuple ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' SCREAMING_SNAKE_CASE__ = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase ) def lowercase_ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowerCamelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
493
0
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class lowercase__ : @staticmethod def A_ ( *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any] ): pass def _lowercase ( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __snake_case = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class lowercase__ ( unittest.TestCase ): A__ : List[str] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def A_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE__ = pipeline( 'document-question-answering' , model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = INVOICE_URL SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(UpperCAmelCase_ ) , UpperCAmelCase_ , '' ) ) ) SCREAMING_SNAKE_CASE__ = 'What is the placebo?' SCREAMING_SNAKE_CASE__ = [ { 'image': load_image(UpperCAmelCase_ ), 'question': question, }, { 'image': image, 'question': question, }, { 'image': image, 'question': question, 'word_boxes': word_boxes, }, ] return dqa_pipeline, examples def A_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE__ = dqa_pipeline(UpperCAmelCase_ , top_k=2 ) self.assertEqual( UpperCAmelCase_ , [ [ {'score': ANY(UpperCAmelCase_ ), 'answer': ANY(UpperCAmelCase_ ), 'start': ANY(UpperCAmelCase_ ), 'end': ANY(UpperCAmelCase_ )}, {'score': ANY(UpperCAmelCase_ ), 'answer': ANY(UpperCAmelCase_ ), 'start': ANY(UpperCAmelCase_ ), 'end': ANY(UpperCAmelCase_ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' ) SCREAMING_SNAKE_CASE__ = INVOICE_URL SCREAMING_SNAKE_CASE__ = 'How many cats are there?' SCREAMING_SNAKE_CASE__ = [ {'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39}, {'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40}, ] SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase_ , decimals=4 ) , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase_ , decimals=4 ) , UpperCAmelCase_ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably SCREAMING_SNAKE_CASE__ = './tests/fixtures/tests_samples/COCO/000000039769.png' SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 ) self.assertEqual(UpperCAmelCase_ , [] ) # We can optionnally pass directly the words and bounding boxes SCREAMING_SNAKE_CASE__ = './tests/fixtures/tests_samples/COCO/000000039769.png' SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , words=UpperCAmelCase_ , boxes=UpperCAmelCase_ , top_k=2 ) self.assertEqual(UpperCAmelCase_ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , ) SCREAMING_SNAKE_CASE__ = INVOICE_URL SCREAMING_SNAKE_CASE__ = 'What is the invoice number?' SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) SCREAMING_SNAKE_CASE__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) SCREAMING_SNAKE_CASE__ = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , ) SCREAMING_SNAKE_CASE__ = INVOICE_URL SCREAMING_SNAKE_CASE__ = 'What is the invoice number?' SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) SCREAMING_SNAKE_CASE__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) SCREAMING_SNAKE_CASE__ = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def A_ ( self : Dict ): SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=UpperCAmelCase_ , revision='3dc6de3' , ) SCREAMING_SNAKE_CASE__ = INVOICE_URL SCREAMING_SNAKE_CASE__ = 'What is the invoice number?' SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) SCREAMING_SNAKE_CASE__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) SCREAMING_SNAKE_CASE__ = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23}, ] ] * 2 , ) SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(UpperCAmelCase_ ) , UpperCAmelCase_ , '' ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=UpperCAmelCase_ , revision='3dc6de3' , max_seq_len=50 , ) SCREAMING_SNAKE_CASE__ = INVOICE_URL SCREAMING_SNAKE_CASE__ = 'What is the invoice number?' SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) SCREAMING_SNAKE_CASE__ = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16}, ] ] * 2 , ) SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(UpperCAmelCase_ ) , UpperCAmelCase_ , '' ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) @slow @require_torch def A_ ( self : int ): SCREAMING_SNAKE_CASE__ = pipeline( 'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , ) SCREAMING_SNAKE_CASE__ = INVOICE_URL SCREAMING_SNAKE_CASE__ = 'What is the invoice number?' SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'answer': 'us-001'}] ) @require_tf @unittest.skip('Document question answering not implemented in TF' ) def A_ ( self : Any ): pass
400
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __snake_case = logging.get_logger(__name__) class lowercase__ ( _UpperCAmelCase ): A__ : Dict =["""input_features""", """is_longer"""] def __init__( self : Any , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : List[str]=48000 , UpperCAmelCase_ : List[Any]=480 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 14000 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = "fusion" , UpperCAmelCase_ : str = "repeatpad" , **UpperCAmelCase_ : List[Any] , ): super().__init__( feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE__ = top_db SCREAMING_SNAKE_CASE__ = truncation SCREAMING_SNAKE_CASE__ = padding SCREAMING_SNAKE_CASE__ = fft_window_size SCREAMING_SNAKE_CASE__ = (fft_window_size >> 1) + 1 SCREAMING_SNAKE_CASE__ = hop_length SCREAMING_SNAKE_CASE__ = max_length_s SCREAMING_SNAKE_CASE__ = max_length_s * sampling_rate SCREAMING_SNAKE_CASE__ = sampling_rate SCREAMING_SNAKE_CASE__ = frequency_min SCREAMING_SNAKE_CASE__ = frequency_max SCREAMING_SNAKE_CASE__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm=UpperCAmelCase_ , mel_scale='htk' , ) SCREAMING_SNAKE_CASE__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm='slaney' , mel_scale='slaney' , ) def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : Optional[np.array] = None ): SCREAMING_SNAKE_CASE__ = spectrogram( UpperCAmelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase_ , log_mel='dB' , ) return log_mel_spectrogram.T def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE__ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE__ = [0] # randomly choose index for each part SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[0] ) SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[1] ) SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[2] ) SCREAMING_SNAKE_CASE__ = mel[idx_front : idx_front + chunk_frames, :] SCREAMING_SNAKE_CASE__ = mel[idx_middle : idx_middle + chunk_frames, :] SCREAMING_SNAKE_CASE__ = mel[idx_back : idx_back + chunk_frames, :] SCREAMING_SNAKE_CASE__ = torch.tensor(mel[None, None, :] ) SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate( UpperCAmelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = mel_shrink[0][0].numpy() SCREAMING_SNAKE_CASE__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": SCREAMING_SNAKE_CASE__ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) - max_length SCREAMING_SNAKE_CASE__ = np.random.randint(0 , overflow + 1 ) SCREAMING_SNAKE_CASE__ = waveform[idx : idx + max_length] SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters ) SCREAMING_SNAKE_CASE__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed SCREAMING_SNAKE_CASE__ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. SCREAMING_SNAKE_CASE__ = np.stack([mel, mel, mel, mel] , axis=0 ) SCREAMING_SNAKE_CASE__ = False else: SCREAMING_SNAKE_CASE__ = self._random_mel_fusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = True else: raise NotImplementedError(F'data_truncating {truncation} not implemented' ) else: SCREAMING_SNAKE_CASE__ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = np.pad(UpperCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters ) SCREAMING_SNAKE_CASE__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[str] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Optional[int] , ): SCREAMING_SNAKE_CASE__ = truncation if truncation is not None else self.truncation SCREAMING_SNAKE_CASE__ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE__ = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) SCREAMING_SNAKE_CASE__ = is_batched_numpy or ( isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ): SCREAMING_SNAKE_CASE__ = np.asarray(UpperCAmelCase_ , dtype=np.floataa ) elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ )] # convert to mel spectrogram, truncate and pad if needed. SCREAMING_SNAKE_CASE__ = [ self._get_input_mel(UpperCAmelCase_ , max_length if max_length else self.nb_max_samples , UpperCAmelCase_ , UpperCAmelCase_ ) for waveform in raw_speech ] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] for mel, longer in padded_inputs: input_mel.append(UpperCAmelCase_ ) is_longer.append(UpperCAmelCase_ ) if truncation == "fusion" and sum(UpperCAmelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer SCREAMING_SNAKE_CASE__ = np.random.randint(0 , len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = True if isinstance(input_mel[0] , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool SCREAMING_SNAKE_CASE__ = [[longer] for longer in is_longer] SCREAMING_SNAKE_CASE__ = {'input_features': input_mel, 'is_longer': is_longer} SCREAMING_SNAKE_CASE__ = BatchFeature(UpperCAmelCase_ ) if return_tensors is not None: SCREAMING_SNAKE_CASE__ = input_features.convert_to_tensors(UpperCAmelCase_ ) return input_features
400
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Union[str, Any]=None ): '''simple docstring''' lowercase : Optional[Any] =np.random.default_rng(UpperCAmelCase__ ) lowercase : Union[str, Any] =length lowercase : List[Any] =rng.normal(size=(length,) ).astype(np.floataa ) lowercase : Dict =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Tuple ): '''simple docstring''' return self.length def __getitem__( self : Union[str, Any] , UpperCAmelCase__ : Any ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : str=False ): '''simple docstring''' super().__init__() lowercase : Any =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowercase : List[str] =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowercase : List[Any] =True def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Any=None ): '''simple docstring''' if self.first_batch: print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowercase : Any =False return x * self.a[0] + self.b[0] class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Optional[Any]=False ): '''simple docstring''' super().__init__() lowercase : Optional[int] =torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() ) lowercase : int =torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() ) lowercase : Tuple =True def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowercase : List[Any] =False return x * self.a + self.b def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : int = 16 ) -> Union[str, Any]: from datasets import load_dataset from transformers import AutoTokenizer lowercase : Dict =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase : Optional[int] ={'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowercase : Dict =load_dataset('''csv''' , data_files=__magic_name__ ) lowercase : int =datasets['''train'''].unique('''label''' ) lowercase : List[str] ={v: i for i, v in enumerate(__magic_name__ )} def tokenize_function(__magic_name__ : Dict ): # max_length=None => use the model max length (it's actually the default) lowercase : Dict =tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ , padding='''max_length''' ) if "label" in examples: lowercase : List[Any] =[label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase : Optional[int] =datasets.map( __magic_name__ , batched=__magic_name__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__magic_name__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__magic_name__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(__magic_name__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowercase : Union[str, Any] =DataLoader(tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=2 ) lowercase : Tuple =DataLoader(tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=1 ) return train_dataloader, eval_dataloader
92
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _lowerCAmelCase = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "mumbai" ): """simple docstring""" lowerCAmelCase__ : List[str] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): lowerCAmelCase__ : Union[str, Any] = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() lowerCAmelCase__ : str = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
565
0
'''simple docstring''' def A ( _UpperCAmelCase : int ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any=False ) -> List[str]: '''simple docstring''' if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ): __lowerCAmelCase : int = len(set_a.intersection(_UpperCAmelCase ) ) if alternative_union: __lowerCAmelCase : Union[str, Any] = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) else: __lowerCAmelCase : List[str] = len(set_a.union(_UpperCAmelCase ) ) return intersection / union if isinstance(_UpperCAmelCase ,(list, tuple) ) and isinstance(_UpperCAmelCase ,(list, tuple) ): __lowerCAmelCase : Union[str, Any] = [element for element in set_a if element in set_b] if alternative_union: __lowerCAmelCase : Dict = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / union else: __lowerCAmelCase : Optional[Any] = set_a + [element for element in set_b if element not in set_a] return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return None if __name__ == "__main__": A_ = {"a", "b", "c", "d", "e"} A_ = {"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
123
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def A ( ) -> Tuple: '''simple docstring''' __lowerCAmelCase : int = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 2_0, 'a ' * 3_0, 'b ' * 7], } __lowerCAmelCase : Dict = Dataset.from_dict(_UpperCAmelCase ) return dataset class UpperCamelCase__ ( a ): '''simple docstring''' def snake_case ( self ) -> Union[str, Any]: __lowerCAmelCase : Dict = get_dataset() __lowerCAmelCase : Union[str, Any] = make_duplicate_clusters(SCREAMING_SNAKE_CASE , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def snake_case ( self ) -> Any: __lowerCAmelCase : List[Any] = get_dataset() __lowerCAmelCase , __lowerCAmelCase : List[Any] = deduplicate_dataset(SCREAMING_SNAKE_CASE ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 ) print(SCREAMING_SNAKE_CASE ) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 ) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , SCREAMING_SNAKE_CASE )
123
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_ = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
523
'''simple docstring''' import numpy as np def UpperCamelCase__ ( _lowercase : np.array ) -> np.array: return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
523
1
"""simple docstring""" import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } _A = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } _A = """</w>""" _A = """@@ """ def lowercase_ ( __UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Union[str, Any] = set() lowerCAmelCase__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : Dict = char return pairs # Speech2Text2 has no max input length _A = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class _lowerCamelCase ( a_ ): _lowerCamelCase :str = VOCAB_FILES_NAMES _lowerCamelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase :str = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : str="<s>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Dict="</s>" , UpperCamelCase : List[str]="<unk>" , UpperCamelCase : Tuple=False , UpperCamelCase : str=None , **UpperCamelCase : Dict , ) -> Tuple: """simple docstring""" super().__init__( unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , do_lower_case=UpperCamelCase , **UpperCamelCase , ) lowerCAmelCase__ : Optional[Any] = do_lower_case with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : Tuple = json.load(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) lowerCAmelCase__ : str = None lowerCAmelCase__ : Optional[Any] = None else: with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : int = merges_handle.read().split("""\n""" )[:-1] lowerCAmelCase__ : List[str] = [tuple(merge.split()[:2] ) for merge in merges] lowerCAmelCase__ : int = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCAmelCase__ : str = {} @property def _lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.decoder ) def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Tuple ) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[str] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCAmelCase__ : List[Any] = get_pairs(UpperCamelCase ) if not pairs: return token while True: lowerCAmelCase__ : Any = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ : Dict = bigram lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : str = 0 while i < len(UpperCamelCase ): try: lowerCAmelCase__ : Union[str, Any] = word.index(UpperCamelCase , UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Union[str, Any] = j if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Optional[int] = tuple(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = new_word if len(UpperCamelCase ) == 1: break else: lowerCAmelCase__ : Any = get_pairs(UpperCamelCase ) lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase ) if word == "\n " + BPE_TOKEN_MERGES: lowerCAmelCase__ : List[Any] = """\n""" + BPE_TOKEN_MERGES if word.endswith(UpperCamelCase ): lowerCAmelCase__ : Tuple = word.replace(UpperCamelCase , """""" ) lowerCAmelCase__ : Optional[Any] = word.replace(""" """ , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = word return word def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Any ) -> Tuple: """simple docstring""" if self.bpe_ranks is None: raise ValueError( """This tokenizer was instantiated without a `merges.txt` file, so""" """ that it can only be used for decoding, not for encoding.""" """Make sure to provide `merges.txt` file at instantiation to enable """ """encoding.""" ) if self.do_lower_case: lowerCAmelCase__ : List[Any] = text.lower() lowerCAmelCase__ : Tuple = text.split() lowerCAmelCase__ : Dict = [] for token in text: if token: split_tokens.extend(list(self.bpe(UpperCamelCase ).split(""" """ ) ) ) return split_tokens def _lowerCAmelCase ( self : Dict , UpperCamelCase : str ) -> int: """simple docstring""" return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : int ) -> str: """simple docstring""" lowerCAmelCase__ : List[Any] = self.decoder.get(UpperCamelCase , self.unk_token ) return result def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[str] ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[Any] = """ """.join(UpperCamelCase ) # make sure @@ tokens are concatenated lowerCAmelCase__ : int = """""".join(string.split(UpperCamelCase ) ) return string def _lowerCAmelCase ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Dict = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : List[str] = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" ) lowerCAmelCase__ : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Optional[int] = token_index writer.write(""" """.join(UpperCamelCase ) + """\n""" ) index += 1 return (vocab_file, merges_file)
716
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _A = logging.getLogger(__name__) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: # save results if os.path.exists(__UpperCAmelCase ): if os.path.exists(os.path.join(__UpperCAmelCase , """config.json""" ) ) and os.path.isfile( os.path.join(__UpperCAmelCase , """config.json""" ) ): os.remove(os.path.join(__UpperCAmelCase , """config.json""" ) ) if os.path.exists(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ) and os.path.isfile( os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ): os.remove(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ) else: os.makedirs(__UpperCAmelCase ) model.save_pretrained(__UpperCAmelCase ) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> str: lowerCAmelCase__ : Dict = 2 if unlogit: lowerCAmelCase__ : Tuple = torch.pow(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = p * torch.log(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = 0 return -plogp.sum(dim=-1 ) def lowercase_ ( __UpperCAmelCase ) -> Any: logger.info("""lv, h >\t""" + """\t""".join(f"""{x + 1}""" for x in range(len(__UpperCAmelCase ) ) ) ) for row in range(len(__UpperCAmelCase ) ): if tensor.dtype != torch.long: logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:d}""" for x in tensor[row].cpu().data ) ) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> List[Any]: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ : Dict = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ).to(args.device ) lowerCAmelCase__ : str = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ).to(args.device ) if head_mask is None: lowerCAmelCase__ : Dict = torch.ones(__UpperCAmelCase , __UpperCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__UpperCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ : str = None lowerCAmelCase__ : Tuple = 0.0 lowerCAmelCase__ : List[Any] = 0.0 for step, inputs in enumerate(tqdm(__UpperCAmelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs ) ((lowerCAmelCase__) , ) : Any = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase , head_mask=__UpperCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = entropy(attn.detach() , __UpperCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__UpperCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ : Optional[int] = 2 lowerCAmelCase__ : Optional[Any] = torch.pow(torch.pow(__UpperCAmelCase , __UpperCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: lowerCAmelCase__ : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("""Attention entropies""" ) print_ad_tensor(__UpperCAmelCase ) if compute_importance: logger.info("""Head importance scores""" ) print_ad_tensor(__UpperCAmelCase ) logger.info("""Head ranked by importance scores""" ) lowerCAmelCase__ : Dict = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ : Optional[Any] = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ : Tuple = head_ranks.view_as(__UpperCAmelCase ) print_ad_tensor(__UpperCAmelCase ) return attn_entropy, head_importance, total_loss def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = compute_heads_importance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase ) lowerCAmelCase__ : Any = 1 / loss # instead of downsteam score use the LM loss logger.info("""Pruning: original score: %f, threshold: %f""" , __UpperCAmelCase , original_score * args.masking_threshold ) lowerCAmelCase__ : Any = torch.ones_like(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ : Optional[int] = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ : Tuple = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ : Tuple = float("""Inf""" ) lowerCAmelCase__ : str = head_importance.view(-1 ).sort()[1] if len(__UpperCAmelCase ) <= num_to_mask: print("""BREAK BY num_to_mask""" ) break # mask heads lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask] logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ : str = new_head_mask.view(-1 ) lowerCAmelCase__ : str = 0.0 lowerCAmelCase__ : Optional[int] = new_head_mask.view_as(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = new_head_mask.clone().detach() print_ad_tensor(__UpperCAmelCase ) # Compute metric and head importance again lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = compute_heads_importance( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , head_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = 1 / loss logger.info( """Masking: current score: %f, remaining heads %d (%.1f percents)""" , __UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("""Final head mask""" ) print_ad_tensor(__UpperCAmelCase ) np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() ) return head_mask def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = compute_heads_importance( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , compute_importance=__UpperCAmelCase , head_mask=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = 1 / loss lowerCAmelCase__ : List[str] = datetime.now() - before_time lowerCAmelCase__ : Optional[Any] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ : List[str] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__UpperCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : int = [ v, ] assert sum(len(__UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ : Dict = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = compute_heads_importance( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , compute_importance=__UpperCAmelCase , head_mask=__UpperCAmelCase , actually_pruned=__UpperCAmelCase , ) lowerCAmelCase__ : List[str] = 1 / loss lowerCAmelCase__ : int = datetime.now() - before_time logger.info( """Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __UpperCAmelCase , __UpperCAmelCase , pruned_num_params / original_num_params * 100 , ) logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __UpperCAmelCase , __UpperCAmelCase ) logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 ) save_model(__UpperCAmelCase , args.output_dir ) def lowercase_ ( ) -> int: lowerCAmelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--data_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , ) parser.add_argument( """--model_name_or_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--output_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , ) # Other parameters parser.add_argument( """--config_name""" , default="""""" , type=__UpperCAmelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--tokenizer_name""" , default="""""" , type=__UpperCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--cache_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , ) parser.add_argument( """--data_subset""" , type=__UpperCAmelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" ) parser.add_argument( """--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) parser.add_argument( """--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" ) parser.add_argument( """--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , ) parser.add_argument( """--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" ) parser.add_argument( """--masking_threshold""" , default=0.9 , type=__UpperCAmelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , ) parser.add_argument( """--masking_amount""" , default=0.1 , type=__UpperCAmelCase , help="""Amount to heads to masking at each masking step.""" ) parser.add_argument("""--metric_name""" , default="""acc""" , type=__UpperCAmelCase , help="""Metric to use for head masking.""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=__UpperCAmelCase , help=( """The maximum total input sequence length after WordPiece tokenization. \n""" """Sequences longer than this will be truncated, sequences shorter padded.""" ) , ) parser.add_argument("""--batch_size""" , default=1 , type=__UpperCAmelCase , help="""Batch size.""" ) parser.add_argument("""--seed""" , type=__UpperCAmelCase , default=42 ) parser.add_argument("""--local_rank""" , type=__UpperCAmelCase , default=-1 , help="""local_rank for distributed training on gpus""" ) parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" ) parser.add_argument("""--server_ip""" , type=__UpperCAmelCase , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=__UpperCAmelCase , default="""""" , help="""Can be used for distant debugging.""" ) lowerCAmelCase__ : Dict = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__UpperCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" ) lowerCAmelCase__ : List[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ : Union[str, Any] = torch.device("""cuda""" , args.local_rank ) lowerCAmelCase__ : int = 1 torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ : Union[str, Any] = nn.parallel.DistributedDataParallel( __UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__UpperCAmelCase ) elif args.n_gpu > 1: lowerCAmelCase__ : int = nn.DataParallel(__UpperCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__UpperCAmelCase ) torch.save(__UpperCAmelCase , os.path.join(args.output_dir , """run_args.bin""" ) ) logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase ) # Prepare dataset lowerCAmelCase__ : Union[str, Any] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ : Dict = (torch.from_numpy(__UpperCAmelCase ),) lowerCAmelCase__ : Any = TensorDataset(*__UpperCAmelCase ) lowerCAmelCase__ : int = RandomSampler(__UpperCAmelCase ) lowerCAmelCase__ : Any = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ : Any = mask_heads(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) prune_heads(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": main()
507
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = { """configuration_trajectory_transformer""": [ """TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrajectoryTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrajectoryTransformerModel""", """TrajectoryTransformerPreTrainedModel""", """load_tf_weights_in_trajectory_transformer""", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :str = [1, 2, 3, 4] _lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Optional[int] = '' _lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Optional[Any] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) _lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Optional[int] = ['It was the best of times.'] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :List[str] = 101 _lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase ) np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
687
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def A__ (self): '''simple docstring''' __UpperCAmelCase =tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __UpperCAmelCase =tf.convert_to_tensor( [[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __UpperCAmelCase =tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above __UpperCAmelCase =tf_top_k_top_p_filtering(UpperCAmelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4) __UpperCAmelCase =output[output != -float('''inf''')] __UpperCAmelCase =tf.cast( tf.where(tf.not_equal(UpperCAmelCase , tf.constant(-float('''inf''') , dtype=tf.floataa))) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-12) tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase) @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase , _lowerCAmelCase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): a_ : int = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def A__ (self): '''simple docstring''' __UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') __UpperCAmelCase =2 __UpperCAmelCase =2 class _SCREAMING_SNAKE_CASE ( tf.Module ): def __init__(self , UpperCAmelCase): '''simple docstring''' super(UpperCAmelCase , self).__init__() __UpperCAmelCase =model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids'''), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask'''), ) , jit_compile=UpperCAmelCase , ) def A__ (self , UpperCAmelCase , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =self.model.generate( input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , ) return {"sequences": outputs["sequences"]} __UpperCAmelCase =[[2, 0], [1_0_2, 1_0_3]] __UpperCAmelCase =[[1, 0], [1, 1]] __UpperCAmelCase =DummyModel(model=UpperCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={'''serving_default''': dummy_model.serving}) __UpperCAmelCase =tf.saved_model.load(UpperCAmelCase).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCAmelCase) + 1): __UpperCAmelCase ={ '''input_ids''': tf.constant(dummy_input_ids[:batch_size]), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size]), } __UpperCAmelCase =serving_func(**UpperCAmelCase)['''sequences'''] __UpperCAmelCase =test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase) tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase) @slow def A__ (self): '''simple docstring''' __UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') __UpperCAmelCase =1 __UpperCAmelCase =2 class _SCREAMING_SNAKE_CASE ( tf.Module ): def __init__(self , UpperCAmelCase): '''simple docstring''' super(UpperCAmelCase , self).__init__() __UpperCAmelCase =model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids'''), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask'''), ) , jit_compile=UpperCAmelCase , ) def A__ (self , UpperCAmelCase , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =self.model.generate( input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , ) return {"sequences": outputs["sequences"]} __UpperCAmelCase =[[2], [1_0_2, 1_0_3]] __UpperCAmelCase =[[1], [1, 1]] __UpperCAmelCase =DummyModel(model=UpperCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={'''serving_default''': dummy_model.serving}) __UpperCAmelCase =tf.saved_model.load(UpperCAmelCase).signatures['''serving_default'''] for input_row in range(len(UpperCAmelCase)): __UpperCAmelCase ={ '''input_ids''': tf.constant([dummy_input_ids[input_row]]), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]]), } __UpperCAmelCase =serving_func(**UpperCAmelCase)['''sequences'''] __UpperCAmelCase =test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase) tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase) @slow @require_tensorflow_text def A__ (self): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase) class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ): def __init__(self): '''simple docstring''' super().__init__() __UpperCAmelCase =text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase , '''spiece.model''') , '''rb''').read()) __UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''') def A__ (self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =self.tokenizer.tokenize(UpperCAmelCase) __UpperCAmelCase , __UpperCAmelCase =text.pad_model_inputs( UpperCAmelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id) __UpperCAmelCase =self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase) return self.tokenizer.detokenize(UpperCAmelCase) __UpperCAmelCase =CompleteSentenceTransformer() __UpperCAmelCase =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''') __UpperCAmelCase =complete_model(UpperCAmelCase) __UpperCAmelCase =tf.keras.Model(UpperCAmelCase , UpperCAmelCase) keras_model.save(UpperCAmelCase) def A__ (self): '''simple docstring''' __UpperCAmelCase ={ '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 1_0, '''temperature''': 0.7, } __UpperCAmelCase =1_4 __UpperCAmelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') __UpperCAmelCase ='''Hello, my dog is cute and''' __UpperCAmelCase =tokenizer(UpperCAmelCase , return_tensors='''tf''') __UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') __UpperCAmelCase =6_3_8 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0'''): tf.random.set_seed(0) __UpperCAmelCase =model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase) self.assertTrue(expectation == len(generated_tokens[0])) __UpperCAmelCase =[6_3_8, 1_9_8] with tf.device(''':/CPU:0'''): tf.random.set_seed(0) __UpperCAmelCase =model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase) self.assertTrue(expectation == len(generated_tokens[0])) def A__ (self): '''simple docstring''' __UpperCAmelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''') __UpperCAmelCase ='''Hugging Face is a technology company based in New York and Paris.''' __UpperCAmelCase =bart_tokenizer(UpperCAmelCase , return_tensors='''tf''').input_ids __UpperCAmelCase =TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''') __UpperCAmelCase =bart_model.generate(UpperCAmelCase).numpy() class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ): def A__ (self , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase): '''simple docstring''' return super().call(UpperCAmelCase , **UpperCAmelCase) __UpperCAmelCase =FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''') __UpperCAmelCase =bart_model.generate(UpperCAmelCase , foo='''bar''').numpy() self.assertTrue(np.array_equal(UpperCAmelCase , UpperCAmelCase)) class _SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ): def A__ (self , UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' return super().call(UpperCAmelCase , **UpperCAmelCase) __UpperCAmelCase =FakeEncoder(bart_model.config , bart_model.model.shared) __UpperCAmelCase =fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __UpperCAmelCase =bart_model.generate(UpperCAmelCase).numpy() with self.assertRaises(UpperCAmelCase): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase , foo='''bar''')
705
from ..utils import DummyObject, requires_backends class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ): a_ : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ): a_ : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ): a_ : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ): a_ : str = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ): a_ : List[str] = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ): a_ : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx''']) @classmethod def A__ (cls , *UpperCAmelCase , **UpperCAmelCase): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
142
0
"""simple docstring""" from __future__ import annotations from math import gcd def A ( _A, _A = 2, _A = 1, _A = 3, ): """simple docstring""" # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_A, _A, _A ) -> int: return (pow(snake_case_, 2 ) + step) % modulus for _ in range(snake_case_ ): # These track the position within the cycle detection logic. snake_case_ :Optional[Any] = seed snake_case_ :str = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. snake_case_ :int = rand_fn(snake_case_, snake_case_, snake_case_ ) snake_case_ :Dict = rand_fn(snake_case_, snake_case_, snake_case_ ) snake_case_ :Union[str, Any] = rand_fn(snake_case_, snake_case_, snake_case_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. snake_case_ :str = gcd(hare - tortoise, snake_case_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. snake_case_ :Optional[int] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) __UpperCAmelCase : Optional[int] = parser.parse_args() __UpperCAmelCase : Optional[int] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'''{args.num} is probably prime''') else: __UpperCAmelCase : Optional[Any] = args.num // divisor print(F'''{args.num} = {divisor} * {quotient}''')
584
"""simple docstring""" def A_ ( snake_case_ : int = 1_0_0_0_0_0_0 ): '''simple docstring''' UpperCamelCase : List[Any] = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,snake_case_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
499
0
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class a_ ( __UpperCamelCase ): def __init__( self : Optional[Any] , snake_case__ : UNetaDModel , snake_case__ : UNetaDModel , snake_case__ : DDPMScheduler , snake_case__ : Optional[Any] , ): super().__init__() lowerCAmelCase__ = value_function lowerCAmelCase__ = unet lowerCAmelCase__ = scheduler lowerCAmelCase__ = env lowerCAmelCase__ = env.get_dataset() lowerCAmelCase__ = {} for key in self.data.keys(): try: lowerCAmelCase__ = self.data[key].mean() except: # noqa: E722 pass lowerCAmelCase__ = {} for key in self.data.keys(): try: lowerCAmelCase__ = self.data[key].std() except: # noqa: E722 pass lowerCAmelCase__ = env.observation_space.shape[0] lowerCAmelCase__ = env.action_space.shape[0] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple ): return (x_in - self.means[key]) / self.stds[key] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Optional[int] ): return x_in * self.stds[key] + self.means[key] def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ): if type(snake_case__ ) is dict: return {k: self.to_torch(snake_case__ ) for k, v in x_in.items()} elif torch.is_tensor(snake_case__ ): return x_in.to(self.unet.device ) return torch.tensor(snake_case__ , device=self.unet.device ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ): for key, val in cond.items(): lowerCAmelCase__ = val.clone() return x_in def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Dict ): lowerCAmelCase__ = x.shape[0] lowerCAmelCase__ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowerCAmelCase__ = torch.full((batch_size,) , snake_case__ , device=self.unet.device , dtype=torch.long ) for _ in range(snake_case__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowerCAmelCase__ = self.value_function(x.permute(0 , 2 , 1 ) , snake_case__ ).sample lowerCAmelCase__ = torch.autograd.grad([y.sum()] , [x] )[0] lowerCAmelCase__ = self.scheduler._get_variance(snake_case__ ) lowerCAmelCase__ = torch.exp(0.5 * posterior_variance ) lowerCAmelCase__ = model_std * grad lowerCAmelCase__ = 0 lowerCAmelCase__ = x.detach() lowerCAmelCase__ = x + scale * grad lowerCAmelCase__ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim ) lowerCAmelCase__ = self.unet(x.permute(0 , 2 , 1 ) , snake_case__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowerCAmelCase__ = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , predict_epsilon=snake_case__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) lowerCAmelCase__ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim ) lowerCAmelCase__ = self.to_torch(snake_case__ ) return x, y def __call__( self : int , snake_case__ : Optional[int] , snake_case__ : int=64 , snake_case__ : Dict=32 , snake_case__ : Any=2 , snake_case__ : Optional[Any]=0.1 ): # normalize the observations and create batch dimension lowerCAmelCase__ = self.normalize(snake_case__ , """observations""" ) lowerCAmelCase__ = obs[None].repeat(snake_case__ , axis=0 ) lowerCAmelCase__ = {0: self.to_torch(snake_case__ )} lowerCAmelCase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowerCAmelCase__ = randn_tensor(snake_case__ , device=self.unet.device ) lowerCAmelCase__ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim ) lowerCAmelCase__ = self.to_torch(snake_case__ ) # run the diffusion process lowerCAmelCase__ , lowerCAmelCase__ = self.run_diffusion(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # sort output trajectories by value lowerCAmelCase__ = y.argsort(0 , descending=snake_case__ ).squeeze() lowerCAmelCase__ = x[sorted_idx] lowerCAmelCase__ = sorted_values[:, :, : self.action_dim] lowerCAmelCase__ = actions.detach().cpu().numpy() lowerCAmelCase__ = self.de_normalize(snake_case__ , key="""actions""" ) # select the action with the highest value if y is not None: lowerCAmelCase__ = 0 else: # if we didn't run value guiding, select a random action lowerCAmelCase__ = np.random.randint(0 , snake_case__ ) lowerCAmelCase__ = denorm_actions[selected_index, 0] return denorm_actions
707
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ ) lowerCAmelCase__ = nlp.model.BERTModel( lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , ) original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ ) lowerCAmelCase__ = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCamelCase__ ), } lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ ) lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = hf_param.shape lowerCAmelCase__ = to_torch(params[gluon_param] ) lowerCAmelCase__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ = layer.attention.self lowerCAmelCase__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ = layer.attention.output lowerCAmelCase__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ = layer.intermediate lowerCAmelCase__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ = layer.output lowerCAmelCase__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""] # Get gluon output lowerCAmelCase__ = mx.nd.array([input_ids] ) lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ ) hf_bort_model.eval() lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" ) lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0] lowerCAmelCase__ = output_gluon[0].asnumpy() lowerCAmelCase__ = output_hf[0].detach().numpy() lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
674
0
'''simple docstring''' import argparse import datetime def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" __magic_name__ : Union[str, Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } __magic_name__ : Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("Must be 10 characters long" ) # Get month __magic_name__ : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) __magic_name__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day __magic_name__ : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator __magic_name__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year __magic_name__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation __magic_name__ : Dict = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: __magic_name__ : Optional[int] = y - 1 __magic_name__ : Any = m + 12 # maths var __magic_name__ : int = int(str(UpperCamelCase__ )[:2] ) __magic_name__ : int = int(str(UpperCamelCase__ )[2:] ) __magic_name__ : int = int(2.6 * m - 5.39 ) __magic_name__ : int = int(c / 4 ) __magic_name__ : int = int(k / 4 ) __magic_name__ : int = int(d + k ) __magic_name__ : int = int(t + u + v + x ) __magic_name__ : int = int(z - (2 * c) ) __magic_name__ : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response __magic_name__ : str = F"""Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser( description=( "Find out what day of the week nearly any date is or was. Enter " "date as a string in the mm-dd-yyyy or mm/dd/yyyy format" ) ) parser.add_argument( "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)" ) _SCREAMING_SNAKE_CASE : str = parser.parse_args() zeller(args.date_input)
436
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time _SCREAMING_SNAKE_CASE : List[Any] = Lock() def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(UpperCamelCase__ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() __magic_name__ : Dict = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left __magic_name__ : int = min(UpperCamelCase__ , UpperCamelCase__ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(UpperCamelCase__ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() __magic_name__ : Optional[Any] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right __magic_name__ : List[str] = max(UpperCamelCase__ , UpperCamelCase__ ) # after all swaps are performed, send the values back to main result_pipe[1].send(UpperCamelCase__ ) def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" __magic_name__ : int = [] __magic_name__ : Union[str, Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop __magic_name__ : Union[str, Any] = Pipe() __magic_name__ : List[str] = Pipe() process_array_.append( Process( target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) __magic_name__ : int = temp_rs __magic_name__ : List[str] = temp_rr for i in range(1 , len(UpperCamelCase__ ) - 1 ): __magic_name__ : Optional[int] = Pipe() __magic_name__ : Optional[int] = Pipe() process_array_.append( Process( target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) __magic_name__ : int = temp_rs __magic_name__ : Union[str, Any] = temp_rr process_array_.append( Process( target=UpperCamelCase__ , args=( len(UpperCamelCase__ ) - 1, arr[len(UpperCamelCase__ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(UpperCamelCase__ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(UpperCamelCase__ ) ): __magic_name__ : str = result_pipe[p][0].recv() process_array_[p].join() return arr def _UpperCamelCase ( ): """simple docstring""" __magic_name__ : int = list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*UpperCamelCase__ ) __magic_name__ : Tuple = odd_even_transposition(UpperCamelCase__ ) print("Sorted List\n" ) print(*UpperCamelCase__ ) if __name__ == "__main__": main()
436
1
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __lowerCamelCase : Tuple = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class lowerCAmelCase__ : A = 42 A = None A = None A = None A = None def __UpperCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int = _str_to_version_tuple(self.version_str ) def __repr__( self : List[Any] ) -> int: """simple docstring""" return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def __UpperCamelCase ( self : List[Any] ) -> str: """simple docstring""" return self.major, self.minor, self.patch def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> Any: """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ): return Version(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): return other raise TypeError(F"""{other} (type {type(UpperCamelCase_ )}) cannot be compared to version.""" ) def __eq__( self : str , UpperCamelCase_ : List[Any] ) -> Dict: """simple docstring""" try: lowerCamelCase_ : List[Any] = self._validate_operand(UpperCamelCase_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> str: """simple docstring""" lowerCamelCase_ : Any = self._validate_operand(UpperCamelCase_ ) return self.tuple < other.tuple def __hash__( self : Optional[int] ) -> str: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : Optional[int] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def __UpperCamelCase ( self : Union[str, Any] ) -> str: """simple docstring""" return self.version_str def __snake_case (__UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : str = _VERSION_REG.match(__UpperCAmelCase ) if not res: raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(__UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def __snake_case (__UpperCAmelCase ): """simple docstring""" return ".".join(str(__UpperCAmelCase ) for v in version_tuple )
418
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Tuple = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
418
1
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : """simple docstring""" @staticmethod def lowercase__ ( *snake_case__ , **snake_case__ ): """simple docstring""" pass def a__ ( SCREAMING_SNAKE_CASE : Image ): '''simple docstring''' lowerCAmelCase : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" a : Optional[Any] =MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" lowerCAmelCase : int = DepthEstimationPipeline(model=snake_case__ , image_processor=snake_case__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowercase__ ( self , snake_case__ , snake_case__ ): """simple docstring""" lowerCAmelCase : Optional[Any] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , snake_case__ ) import datasets lowerCAmelCase : List[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) lowerCAmelCase : List[str] = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , snake_case__ , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def lowercase__ ( self ): """simple docstring""" pass @slow @require_torch def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[int] = "Intel/dpt-large" lowerCAmelCase : List[str] = pipeline("depth-estimation" , model=snake_case__ ) lowerCAmelCase : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) lowerCAmelCase : Optional[Any] = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def lowercase__ ( self ): """simple docstring""" self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
645
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : int ="imagegpt" a : Union[str, Any] =["past_key_values"] a : Optional[Any] ={ "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=512 + 1 , snake_case__=32 * 32 , snake_case__=512 , snake_case__=24 , snake_case__=8 , snake_case__=None , snake_case__="quick_gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , **snake_case__ , ): """simple docstring""" lowerCAmelCase : Tuple = vocab_size lowerCAmelCase : List[Any] = n_positions lowerCAmelCase : Union[str, Any] = n_embd lowerCAmelCase : str = n_layer lowerCAmelCase : Tuple = n_head lowerCAmelCase : Optional[Any] = n_inner lowerCAmelCase : Dict = activation_function lowerCAmelCase : str = resid_pdrop lowerCAmelCase : Optional[int] = embd_pdrop lowerCAmelCase : Optional[int] = attn_pdrop lowerCAmelCase : Union[str, Any] = layer_norm_epsilon lowerCAmelCase : Any = initializer_range lowerCAmelCase : Union[str, Any] = scale_attn_weights lowerCAmelCase : int = use_cache lowerCAmelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCAmelCase : Optional[int] = reorder_and_upcast_attn lowerCAmelCase : int = tie_word_embeddings super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ ) class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" @property def lowercase__ ( self ): """simple docstring""" return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def lowercase__ ( self , snake_case__ , snake_case__ = 1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 32 , snake_case__ = 32 , ): """simple docstring""" lowerCAmelCase : Tuple = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Union[str, Any] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) ) return inputs
645
1
'''simple docstring''' __UpperCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}] __UpperCAmelCase = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
692
'''simple docstring''' import os import sys import unittest __UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCAmelCase = os.path.join(git_repo_path, "src", "transformers") __UpperCAmelCase = "\n{0} = None\n" __UpperCAmelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" __UpperCAmelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' snake_case: List[str] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(SCREAMING_SNAKE_CASE__ ) snake_case: List[str] = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tokenizers' ) snake_case: List[Any] = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'tensorflow_text' ) snake_case: int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers' ) snake_case: Optional[Any] = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tensorflow_text' ) snake_case: Dict = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'sentencepiece_and_tokenizers_and_vision' ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: str = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , SCREAMING_SNAKE_CASE__ ) self.assertIn('tensorflow_text' , SCREAMING_SNAKE_CASE__ ) self.assertIn('sentencepiece_and_tokenizers' , SCREAMING_SNAKE_CASE__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Dict = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' ) snake_case: Any = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) snake_case: Optional[int] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' snake_case: Tuple = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' snake_case: Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ )
692
1
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) lowercase__ = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" lowercase__ = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" lowercase__ = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if (len(SCREAMING_SNAKE_CASE ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" a_ : Optional[int] = ShapEImgaImgPipeline a_ : Union[str, Any] = ["""image"""] a_ : Union[str, Any] = ["""image"""] a_ : Optional[Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a_ : int = False @property def _lowerCAmelCase ( self ): return 32 @property def _lowerCAmelCase ( self ): return 32 @property def _lowerCAmelCase ( self ): return self.time_input_dim * 4 @property def _lowerCAmelCase ( self ): return 8 @property def _lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCamelCase : int = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) _lowerCamelCase : int = CLIPVisionModel(A ) return model @property def _lowerCAmelCase ( self ): _lowerCamelCase : str = CLIPImageProcessor( crop_size=224 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor @property def _lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCamelCase : Union[str, Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } _lowerCamelCase : Optional[Any] = PriorTransformer(**A ) return model @property def _lowerCAmelCase ( self ): torch.manual_seed(0 ) _lowerCamelCase : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } _lowerCamelCase : Any = ShapERenderer(**A ) return model def _lowerCAmelCase ( self ): _lowerCamelCase : Tuple = self.dummy_prior _lowerCamelCase : Union[str, Any] = self.dummy_image_encoder _lowerCamelCase : Optional[int] = self.dummy_image_processor _lowerCamelCase : str = self.dummy_renderer _lowerCamelCase : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , ) _lowerCamelCase : List[str] = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def _lowerCAmelCase ( self , A , A=0 ): _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): _lowerCamelCase : Any = torch.manual_seed(A ) else: _lowerCamelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) _lowerCamelCase : Optional[int] = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def _lowerCAmelCase ( self ): _lowerCamelCase : int = 'cpu' _lowerCamelCase : Union[str, Any] = self.get_dummy_components() _lowerCamelCase : int = self.pipeline_class(**A ) _lowerCamelCase : Tuple = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(A ) ) _lowerCamelCase : Tuple = output.images[0] _lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _lowerCamelCase : List[Any] = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCAmelCase ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _lowerCAmelCase ( self ): _lowerCamelCase : List[Any] = torch_device == 'cpu' _lowerCamelCase : Optional[int] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=A , relax_max_difference=A , ) def _lowerCAmelCase ( self ): _lowerCamelCase : str = self.get_dummy_components() _lowerCamelCase : int = self.pipeline_class(**A ) _lowerCamelCase : Union[str, Any] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _lowerCamelCase : List[Any] = 1 _lowerCamelCase : Tuple = 2 _lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A ) for key in inputs.keys(): if key in self.batch_params: _lowerCamelCase : List[Any] = batch_size * [inputs[key]] _lowerCamelCase : int = pipe(**A , num_images_per_prompt=A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A_(unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ): _lowerCamelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) _lowerCamelCase : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) _lowerCamelCase : List[str] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) _lowerCamelCase : Optional[Any] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _lowerCamelCase : Dict = torch.Generator(device=A ).manual_seed(0 ) _lowerCamelCase : List[Any] = pipe( A , generator=A , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(A , A )
349
"""simple docstring""" import numpy as np def UpperCAmelCase_ ( __a : np.array ): '''simple docstring''' return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
349
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self ) -> List[str]: """simple docstring""" lowercase_ : str = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ], dtype=tf.floataa, ) lowercase_ : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.intaa, ) # expected non filtered idx as noted above lowercase_ : List[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.floataa, ) # expected non filtered values as noted above lowercase_ : int = tf_top_k_top_p_filtering(snake_case__, top_k=10, top_p=0.6, min_tokens_to_keep=4 ) lowercase_ : Dict = output[output != -float("""inf""" )] lowercase_ : Tuple = tf.cast( tf.where(tf.not_equal(snake_case__, tf.constant(-float("""inf""" ), dtype=tf.floataa ) ) ), dtype=tf.intaa, ) tf.debugging.assert_near(snake_case__, snake_case__, rtol=1E-12 ) tf.debugging.assert_equal(snake_case__, snake_case__ ) @require_tf class UpperCamelCase__ ( unittest.TestCase , lowerCamelCase__ ): '''simple docstring''' if is_tf_available(): __a : Optional[int] = { """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def snake_case__ ( self ) -> str: """simple docstring""" # TF-only test: tf.saved_model export lowercase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ : int = 2 lowercase_ : Optional[Any] = 2 class UpperCamelCase__ ( tf.Module ): '''simple docstring''' def __init__( self, snake_case__ ) -> Any: """simple docstring""" super(snake_case__, self ).__init__() lowercase_ : int = model @tf.function( input_signature=( tf.TensorSpec((None, input_length), tf.intaa, name="""input_ids""" ), tf.TensorSpec((None, input_length), tf.intaa, name="""attention_mask""" ), ), jit_compile=snake_case__, ) def snake_case__ ( self, snake_case__, snake_case__ ) -> int: """simple docstring""" lowercase_ : Tuple = self.model.generate( input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, ) return {"sequences": outputs["sequences"]} lowercase_ : Union[str, Any] = [[2, 0], [1_02, 1_03]] lowercase_ : Tuple = [[1, 0], [1, 1]] lowercase_ : int = DummyModel(model=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} ) lowercase_ : List[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""] for batch_size in range(1, len(snake_case__ ) + 1 ): lowercase_ : str = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } lowercase_ : Optional[int] = serving_func(**snake_case__ )["""sequences"""] lowercase_ : Optional[int] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ ) tf.debugging.assert_equal(snake_case__, snake_case__ ) @slow def snake_case__ ( self ) -> Tuple: """simple docstring""" # TF-only test: tf.saved_model export lowercase_ : int = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ : Tuple = 1 lowercase_ : Tuple = 2 class UpperCamelCase__ ( tf.Module ): '''simple docstring''' def __init__( self, snake_case__ ) -> List[str]: """simple docstring""" super(snake_case__, self ).__init__() lowercase_ : Union[str, Any] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None), tf.intaa, name="""input_ids""" ), tf.TensorSpec((batch_size, None), tf.intaa, name="""attention_mask""" ), ), jit_compile=snake_case__, ) def snake_case__ ( self, snake_case__, snake_case__ ) -> Optional[Any]: """simple docstring""" lowercase_ : str = self.model.generate( input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, ) return {"sequences": outputs["sequences"]} lowercase_ : Union[str, Any] = [[2], [1_02, 1_03]] lowercase_ : List[str] = [[1], [1, 1]] lowercase_ : Union[str, Any] = DummyModel(model=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} ) lowercase_ : Optional[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""] for input_row in range(len(snake_case__ ) ): lowercase_ : List[str] = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } lowercase_ : Union[str, Any] = serving_func(**snake_case__ )["""sequences"""] lowercase_ : Optional[Any] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ ) tf.debugging.assert_equal(snake_case__, snake_case__ ) @slow @require_tensorflow_text def snake_case__ ( self ) -> Any: """simple docstring""" # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""", filename="""spiece.model""", local_dir=snake_case__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ) -> Optional[Any]: """simple docstring""" super().__init__() lowercase_ : Tuple = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(snake_case__, """spiece.model""" ), """rb""" ).read() ) lowercase_ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def snake_case__ ( self, snake_case__, *snake_case__, **snake_case__ ) -> Optional[Any]: """simple docstring""" lowercase_ : Union[str, Any] = self.tokenizer.tokenize(snake_case__ ) lowercase_ , lowercase_ : Union[str, Any] = text.pad_model_inputs( snake_case__, max_seq_length=64, pad_value=self.model.config.pad_token_id ) lowercase_ : str = self.model.generate(input_ids=snake_case__, attention_mask=snake_case__ ) return self.tokenizer.detokenize(snake_case__ ) lowercase_ : Optional[int] = CompleteSentenceTransformer() lowercase_ : Union[str, Any] = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name="""inputs""" ) lowercase_ : Any = complete_model(snake_case__ ) lowercase_ : Optional[Any] = tf.keras.Model(snake_case__, snake_case__ ) keras_model.save(snake_case__ ) def snake_case__ ( self ) -> Optional[int]: """simple docstring""" # Has PT equivalent: this test relies on random sampling lowercase_ : Any = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } lowercase_ : List[str] = 14 lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ : Dict = """Hello, my dog is cute and""" lowercase_ : List[str] = tokenizer(snake_case__, return_tensors="""tf""" ) lowercase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowercase_ : int = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowercase_ : int = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def snake_case__ ( self ) -> str: """simple docstring""" # Has PT equivalent: ample use of framework-specific code lowercase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) lowercase_ : int = """Hugging Face is a technology company based in New York and Paris.""" lowercase_ : int = bart_tokenizer(snake_case__, return_tensors="""tf""" ).input_ids lowercase_ : Tuple = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) lowercase_ : Union[str, Any] = bart_model.generate(snake_case__ ).numpy() class UpperCamelCase__ ( lowerCamelCase__ ): '''simple docstring''' def snake_case__ ( self, snake_case__, snake_case__=None, **snake_case__ ) -> str: """simple docstring""" return super().call(snake_case__, **snake_case__ ) lowercase_ : Tuple = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) lowercase_ : str = bart_model.generate(snake_case__, foo="""bar""" ).numpy() self.assertTrue(np.array_equal(snake_case__, snake_case__ ) ) class UpperCamelCase__ ( bart_model.model.encoder.__class__ ): '''simple docstring''' def snake_case__ ( self, snake_case__, **snake_case__ ) -> List[str]: """simple docstring""" return super().call(snake_case__, **snake_case__ ) lowercase_ : Optional[int] = FakeEncoder(bart_model.config, bart_model.model.shared ) lowercase_ : Any = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowercase_ : Any = bart_model.generate(snake_case__ ).numpy() with self.assertRaises(snake_case__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(snake_case__, foo="""bar""" )
458
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __magic_name__ ( ) -> Union[str, Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowercase ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def __magic_name__ ( ) -> Union[str, Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def __magic_name__ ( ) -> Dict: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowercase ): http_head("""https://huggingface.co""" )
458
1
'''simple docstring''' from __future__ import annotations import math def _A ( A__ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _A ( A__ ): """simple docstring""" __lowercase = str(A__ ) __lowercase = [n] for i in range(1 , len(A__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _A ( A__ ): """simple docstring""" if len(str(A__ ) ) > 3: if not is_prime(int(str(A__ )[-3:] ) ) or not is_prime(int(str(A__ )[:3] ) ): return False return True def _A ( A__ = 11 ): """simple docstring""" __lowercase = [] __lowercase = 13 while len(A__ ) != count: if validate(A__ ): __lowercase = list_truncated_nums(A__ ) if all(is_prime(A__ ) for i in list_nums ): list_truncated_primes.append(A__ ) num += 2 return list_truncated_primes def _A ( ): """simple docstring""" return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'{sum(compute_truncated_primes(11)) = }')
702
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase_ (lowerCamelCase__ ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase__ ,'''embed_dim''' ) ) self.parent.assertTrue(hasattr(lowercase__ ,'''num_heads''' ) ) class lowercase_ : """simple docstring""" def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=[1_6, 4_8, 9_6] ,lowercase__ : Optional[Any]=[1, 3, 6] ,lowercase__ : Tuple=[1, 2, 1_0] ,lowercase__ : Optional[int]=[7, 3, 3] ,lowercase__ : str=[4, 2, 2] ,lowercase__ : Dict=[2, 1, 1] ,lowercase__ : Tuple=[2, 2, 2] ,lowercase__ : Tuple=[False, False, True] ,lowercase__ : int=[0.0, 0.0, 0.0] ,lowercase__ : str=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=2 ,): __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_sizes __lowercase = patch_stride __lowercase = patch_padding __lowercase = is_training __lowercase = use_labels __lowercase = num_labels __lowercase = num_channels __lowercase = embed_dim __lowercase = num_heads __lowercase = stride_kv __lowercase = depth __lowercase = cls_token __lowercase = attention_drop_rate __lowercase = initializer_range __lowercase = layer_norm_eps def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] ,self.num_labels ) __lowercase = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : str ): return CvtConfig( image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[str] ): __lowercase = CvtModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ) __lowercase = (self.image_size, self.image_size) __lowercase , __lowercase = image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Dict ): __lowercase = self.num_labels __lowercase = CvtForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE : Optional[int] = ( {'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : str = False def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = CvtModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self : str ): return @unittest.skip(reason='''Cvt does not output attentions''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE ( self : str ): pass def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(lowercase__ ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ): def check_hidden_states_output(lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ): __lowercase = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): __lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) ) __lowercase = outputs.hidden_states __lowercase = len(self.model_tester.depth ) self.assertEqual(len(lowercase__ ) ,lowercase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) ,[ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] ,) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = CvtModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) def _A ( ): """simple docstring""" __lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowercase_ (unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ ) # forward pass with torch.no_grad(): __lowercase = model(**lowercase__ ) # verify the logits __lowercase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape ,lowercase__ ) __lowercase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
624
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''image_processor''', '''tokenizer'''] lowerCAmelCase_ = '''CLIPImageProcessor''' lowerCAmelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Dict , _A : List[str]=None , _A : List[Any]=None , **_A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _A , ) __SCREAMING_SNAKE_CASE : str = kwargs.pop('''feature_extractor''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_A , _A ) def __call__( self : List[str] , _A : Union[str, Any]=None , _A : Union[str, Any]=None , _A : Tuple=None , **_A : Optional[int] ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(_A , return_tensors=_A , **_A ) if images is not None: __SCREAMING_SNAKE_CASE : str = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None and images is not None: __SCREAMING_SNAKE_CASE : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def UpperCAmelCase__ ( self : List[Any] , *_A : Tuple , **_A : Any ): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A ) def UpperCAmelCase__ ( self : Optional[int] , *_A : Optional[Any] , **_A : Optional[Any] ): """simple docstring""" return self.tokenizer.decode(*_A , **_A ) @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.tokenizer.model_input_names __SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , ) return self.image_processor_class @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , ) return self.image_processor
74
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE: int = [0 for i in range(r + 1 )] # nc0 = 1 __SCREAMING_SNAKE_CASE: Dict = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __SCREAMING_SNAKE_CASE: Optional[Any] = min(UpperCamelCase__ , UpperCamelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
202
0
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowerCamelCase : Dict = pytest.mark.integration __lowerCamelCase : int = {'comet'} __lowerCamelCase : Dict = importlib.util.find_spec('fairseq') is not None __lowerCamelCase : Tuple = {'code_eval'} __lowerCamelCase : Optional[Any] = os.name == 'nt' __lowerCamelCase : Union[str, Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowerCamelCase : Union[str, Any] = importlib.util.find_spec('transformers') is not None def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" @wraps(__A ) def wrapper(self , __SCREAMING_SNAKE_CASE ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''\"test requires Fairseq\"''' ) else: test_case(self , __A ) return wrapper def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" @wraps(__A ) def wrapper(self , __SCREAMING_SNAKE_CASE ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''\"test requires transformers\"''' ) else: test_case(self , __A ) return wrapper def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" @wraps(__A ) def wrapper(self , __SCREAMING_SNAKE_CASE ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''\"test not supported on Windows\"''' ) else: test_case(self , __A ) return wrapper def _a (): """simple docstring""" _UpperCamelCase =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names()) @for_all_test_methods( _snake_case , _snake_case , _snake_case) @local class UpperCAmelCase ( parameterized.TestCase): """simple docstring""" lowerCAmelCase_ = {} lowerCAmelCase_ = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : Tuple ) -> Dict: _UpperCamelCase ='''[...]''' _UpperCamelCase =importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCAmelCase__ ) ).module_path ) _UpperCamelCase =datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase__ ) # check parameters _UpperCamelCase =inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowerCAmelCase__ , metric_module.__name__ ): with self.use_local_metrics(): try: _UpperCamelCase =doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def UpperCamelCase__ ( self : str , UpperCamelCase__ : Optional[int] ) -> str: _UpperCamelCase ='''[...]''' _UpperCamelCase =importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCAmelCase__ ) ).module_path ) # run doctest with self.use_local_metrics(): _UpperCamelCase =doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Dict: if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase__ ): yield else: yield @contextmanager def UpperCamelCase__ ( self : List[str] ) -> int: def load_local_metric(UpperCamelCase__ : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : Dict ): return load_metric(os.path.join('''metrics''' , lowerCAmelCase__ ) , *lowerCAmelCase__ , **lowerCAmelCase__ ) with patch('''datasets.load_metric''' ) as mock_load_metric: _UpperCamelCase =load_local_metric yield @classmethod def UpperCamelCase__ ( cls : List[Any] , UpperCamelCase__ : Any ) -> str: def wrapper(UpperCamelCase__ : List[Any] ): _UpperCamelCase =contextmanager(lowerCAmelCase__ ) _UpperCamelCase =patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class UpperCAmelCase ( _snake_case): """simple docstring""" def UpperCamelCase__ ( self : Tuple , UpperCamelCase__ : Any ) -> int: assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: _UpperCamelCase =MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" import torch def bert_cos_score_idf(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__A ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: _UpperCamelCase =bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" def load_from_checkpoint(__SCREAMING_SNAKE_CASE ): class UpperCAmelCase : """simple docstring""" def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> Dict: assert len(lowerCAmelCase__ ) == 2 _UpperCamelCase =[0.19, 0.92] return scores, sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: _UpperCamelCase =None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: _UpperCamelCase =load_from_checkpoint yield def _a (): """simple docstring""" _UpperCamelCase =load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) _UpperCamelCase ='''ERROR''' _UpperCamelCase =f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__A , match=re.escape(__A ) ): metric.compute(predictions=[] , references=[] , scheme=__A )
701
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class UpperCAmelCase ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=30 , UpperCamelCase__ : Union[str, Any]=400 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=0.9 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , ) -> Dict: _UpperCamelCase =size if size is not None else {'''shortest_edge''': 30} _UpperCamelCase =crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} _UpperCamelCase =parent _UpperCamelCase =batch_size _UpperCamelCase =num_channels _UpperCamelCase =min_resolution _UpperCamelCase =max_resolution _UpperCamelCase =do_resize_and_center_crop _UpperCamelCase =size _UpperCamelCase =crop_pct _UpperCamelCase =crop_size _UpperCamelCase =do_normalize _UpperCamelCase =image_mean _UpperCamelCase =image_std def UpperCamelCase__ ( self : str ) -> Union[str, Any]: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class UpperCAmelCase ( lowercase_ , unittest.TestCase): """simple docstring""" lowerCAmelCase_ = PoolFormerImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self : Optional[int] ) -> int: _UpperCamelCase =PoolFormerImageProcessingTester(self ) @property def UpperCamelCase__ ( self : Tuple ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self : Optional[Any] ) -> int: _UpperCamelCase =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''crop_pct''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) ) def UpperCamelCase__ ( self : Any ) -> Dict: _UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) _UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase__ ( self : Dict ) -> Optional[Any]: pass def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]: # Initialize image_processing _UpperCamelCase =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input _UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[int]: # Initialize image_processing _UpperCamelCase =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase__ ( self : str ) -> List[Any]: # Initialize image_processing _UpperCamelCase =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
271
0
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) lowerCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'latents'} def _a ( self : Any ) -> List[Any]: return self._get_superresolution_dummy_components() def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> int: if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __UpperCAmelCase =torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : Optional[Any] ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _a ( self : str ) -> str: self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def _a ( self : Optional[Any] ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def _a ( self : Any ) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _a ( self : List[Any] ) -> Tuple: self._test_save_load_local() def _a ( self : List[Any] ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
68
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ): def _UpperCAmelCase ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = load_tool("text-to-speech" ) self.tool.setup() def _UpperCAmelCase ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = self.tool("hey" ) UpperCAmelCase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) ) def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = self.tool("hey" ) UpperCAmelCase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
475
0
from __future__ import annotations from decimal import Decimal from numpy import array def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ): __a : str = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(lowerCamelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix __a : Dict = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements __a : List[Any] = [[0.0, 0.0], [0.0, 0.0]] __a , __a : List[str] = matrix[1][1], matrix[0][0] __a , __a : Tuple = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(lowerCamelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(lowerCamelCase_ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule __a : List[Any] = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix __a : Tuple = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] __a : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) __a : List[Any] = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) __a : List[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) __a : Optional[int] = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) __a : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) __a : List[str] = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) __a : Union[str, Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) __a : List[Any] = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) __a : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) __a : Optional[Any] = array(lowerCamelCase_ ) for i in range(3 ): for j in range(3 ): __a : Optional[int] = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix __a : Union[str, Any] = array(lowerCamelCase_ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(lowerCamelCase_ ) # Calculate the inverse of the matrix return [[float(d(lowerCamelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
720
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : int = equationa __a , __a , __a : Dict = equationa # Calculate the determinants of the matrices __a : Dict = aa * ba - aa * ba __a : List[Any] = ca * ba - ca * ba __a : Tuple = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : int = determinant_x / determinant __a : List[str] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
577
0
"""simple docstring""" from __future__ import annotations def __A ( a_ :str) -> list[int]: return [ord(a_) - 96 for elem in plain] def __A ( a_ :list[int]) -> str: return "".join(chr(elem + 96) for elem in encoded) def __A ( ) -> None: __a : Dict = encode(input('''-> ''').strip().lower()) print('''Encoded: ''' , a_) print('''Decoded:''' , decode(a_)) if __name__ == "__main__": main()
52
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase): __lowercase : int = BarthezTokenizer __lowercase : Any = BarthezTokenizerFast __lowercase : Dict = True __lowercase : Optional[int] = True def lowerCAmelCase ( self ) -> Dict: '''simple docstring''' super().setUp() __snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE ) __snake_case = tokenizer def lowerCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case = '''<pad>''' __snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 ) def lowerCAmelCase ( self ) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 ) @require_torch def lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __snake_case = [0, 57, 3018, 7_0307, 91, 2] __snake_case = self.tokenizer( __SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __snake_case = batch.input_ids.tolist()[0] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return __snake_case = self.get_tokenizer() __snake_case = self.get_rust_tokenizer() __snake_case = '''I was born in 92000, and this is falsé.''' __snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case = self.get_rust_tokenizer() __snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE ) __snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def lowerCAmelCase ( self ) -> int: '''simple docstring''' __snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __snake_case = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
24
0
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowercase = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') lowercase , lowercase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') lowercase = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: lowercase = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowercase = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
700
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowercase = logging.get_logger(__name__) lowercase = '''T5Config''' class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case__ : Optional[int] = '''mt5''' snake_case__ : Dict = MTaConfig class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case__ : List[str] = '''mt5''' snake_case__ : List[str] = MTaConfig class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case__ : Optional[int] = '''mt5''' snake_case__ : Union[str, Any] = MTaConfig
564
0
'''simple docstring''' def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 )-> int: __UpperCAmelCase = right or len(__lowercase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(__lowercase , __lowercase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
126
from __future__ import annotations def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]: '''simple docstring''' if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
670
0
def lowercase ( _a ) -> int: return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def lowercase ( _a ) -> bool: UpperCAmelCase_: Any = 0 UpperCAmelCase_: List[str] = number while duplicate > 0: UpperCAmelCase_ , UpperCAmelCase_: List[Any] = divmod(_a ,10 ) fact_sum += factorial(_a ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") _lowerCAmelCase = int(input("""Enter number: """).strip()) print( F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.""" )
306
from __future__ import annotations _lowerCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _lowerCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def lowercase ( _a ) -> list[float]: UpperCAmelCase_: Dict = [] UpperCAmelCase_: List[Any] = len(_a ) for i in range(_a ): UpperCAmelCase_: float = -1 for j in range(i + 1 ,_a ): if arr[i] < arr[j]: UpperCAmelCase_: List[str] = arr[j] break result.append(_a ) return result def lowercase ( _a ) -> list[float]: UpperCAmelCase_: List[Any] = [] for i, outer in enumerate(_a ): UpperCAmelCase_: float = -1 for inner in arr[i + 1 :]: if outer < inner: UpperCAmelCase_: Union[str, Any] = inner break result.append(_a ) return result def lowercase ( _a ) -> list[float]: UpperCAmelCase_: Union[str, Any] = len(_a ) UpperCAmelCase_: list[float] = [] UpperCAmelCase_: list[float] = [-1] * arr_size for index in reversed(range(_a ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: UpperCAmelCase_: Union[str, Any] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _lowerCAmelCase = ( """from __main__ import arr, next_greatest_element_slow, """ """next_greatest_element_fast, next_greatest_element""" ) print( """next_greatest_element_slow():""", timeit("""next_greatest_element_slow(arr)""", setup=setup), ) print( """next_greatest_element_fast():""", timeit("""next_greatest_element_fast(arr)""", setup=setup), ) print( """ next_greatest_element():""", timeit("""next_greatest_element(arr)""", setup=setup), )
306
1
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
521
from __future__ import annotations def __UpperCamelCase ( lowerCAmelCase__ : list[float] , lowerCAmelCase__ : int ): print(f"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(lowerCAmelCase__ ): print(f"{i}\t\t{d}" ) def __UpperCamelCase ( lowerCAmelCase__ : list[dict[str, int]] , lowerCAmelCase__ : list[float] , lowerCAmelCase__ : int ): for j in range(lowerCAmelCase__ ): __a , __a , __a : Optional[Any] = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __UpperCamelCase ( lowerCAmelCase__ : list[dict[str, int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ): __a : Optional[Any] = [float('''inf''' )] * vertex_count __a : Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(lowerCAmelCase__ ): __a , __a , __a : int = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __a : Any = distance[u] + w __a : Union[str, Any] = check_negative_cycle(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowercase__ =int(input('Enter number of vertices: ').strip()) lowercase__ =int(input('Enter number of edges: ').strip()) lowercase__ =[{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) lowercase__ , lowercase__ , lowercase__ =( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) lowercase__ ={'src': src, 'dst': dest, 'weight': weight} lowercase__ =int(input('\nEnter shortest path source:').strip()) lowercase__ =bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
521
1
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping __lowerCAmelCase = tuple[int, int] class SCREAMING_SNAKE_CASE : def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : set[int] , __SCREAMING_SNAKE_CASE : Mapping[EdgeT, int] ) -> None: a_ : set[int] = vertices a_ : dict[EdgeT, int] = { (min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items() } def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : EdgeT , __SCREAMING_SNAKE_CASE : int ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) a_ : Optional[Any] = weight def SCREAMING_SNAKE_CASE ( self : Any ) -> Graph: a_ : Graph = Graph({min(self.vertices )} , {} ) a_ : EdgeT a_ : int a_ : EdgeT a_ : int while len(subgraph.vertices ) < len(self.vertices ): a_ : List[Any] = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: a_ : Any = edge a_ : List[Any] = weight subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return subgraph def _UpperCAmelCase ( __A : str = "p107_network.txt" ): a_ : str = os.path.abspath(os.path.dirname(__A ) ) a_ : str = os.path.join(__A , __A ) a_ : dict[EdgeT, int] = {} a_ : list[str] a_ : int a_ : int with open(__A ) as f: a_ : str = f.read().strip().split('''\n''' ) a_ : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(__A ) ): for edgea in range(__A ): if adjaceny_matrix[edgea][edgea] != "-": a_ : Tuple = int(adjaceny_matrix[edgea][edgea] ) a_ : Graph = Graph(set(range(len(__A ) ) ) , __A ) a_ : Graph = graph.prims_algorithm() a_ : int = sum(graph.edges.values() ) a_ : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"""{solution() = }""")
714
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __lowerCAmelCase = logging.get_logger(__name__) logging.set_verbosity_info() def _UpperCAmelCase ( __A : str , __A : str ): if "xprophetnet" in prophetnet_checkpoint_path: a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A ) a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained( __A , output_loading_info=__A ) else: a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A ) a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained( __A , output_loading_info=__A ) a_ : str = ['''key_proj''', '''value_proj''', '''query_proj'''] a_ : Tuple = { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: a_ : List[str] = key.split('''.''' ) if attributes[0] == "lm_head": a_ : List[str] = prophet a_ : Dict = prophet_old else: a_ : str = prophet.prophetnet a_ : int = prophet_old.model a_ : str = False for attribute in attributes: if attribute in mapping: a_ : Dict = mapping[attribute] if not hasattr(__A , __A ) and len(__A ) > 0: a_ : List[str] = attribute elif hasattr(__A , __A ): a_ : Union[str, Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" a_ : Tuple = old_model.weight logger.info(f'{attribute} is initialized.' ) a_ : Union[str, Any] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" a_ : Union[str, Any] = old_model.bias logger.info(f'{attribute} is initialized' ) a_ : Dict = True break elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ): a_ : Tuple = old_model.in_proj_weight.shape[0] // 3 a_ : Any = getattr(__A , __A ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) a_ : Dict = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) a_ : Optional[Any] = True break if attribute.isdigit(): a_ : Union[str, Any] = model[int(__A )] a_ : str = old_model[int(__A )] else: a_ : Tuple = getattr(__A , __A ) if old_attribute == "": a_ : List[str] = old_model else: if not hasattr(__A , __A ): raise ValueError(f'{old_model} does not have {old_attribute}' ) a_ : Optional[Any] = getattr(__A , __A ) if not is_key_init: raise ValueError(f'{key} was not correctly initialized!' ) print(f'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(__A ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
666
0