code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :int , _snake_case :bool , _snake_case :list[int] , _snake_case :float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(_snake_case ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , ) def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = [90, 23, 6, 33, 21, 65, 123, 34_423] _A = math.log(len(_snake_case ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , _snake_case , _snake_case , _snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
2
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
1
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Optional[Any] = BlenderbotSmallTokenizer a__ : List[str] = False def snake_case_ ( self : Optional[Any] ) -> Optional[int]: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCAmelCase ) ) def snake_case_ ( self : List[str] , **__lowerCAmelCase : Any ) -> Any: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> Optional[Any]: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def snake_case_ ( self : List[Any] ) -> int: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def snake_case_ ( self : Dict ) -> Any: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids'''] _A = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def snake_case_ ( self : Optional[Any] ) -> Optional[Any]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__lowerCAmelCase )['''input_ids'''] _A = tok(__lowerCAmelCase )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
2
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
1
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :List[Any] , _snake_case :str=8 ) -> str: _A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Tuple=512 , _snake_case :Any=512 ) -> Optional[Any]: _A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) _A = np.array(pil_image.convert('''RGB''' ) ) _A = arr.astype(np.floataa ) / 127.5 - 1 _A = np.transpose(_snake_case , [2, 0, 1] ) _A = torch.from_numpy(_snake_case ).unsqueeze(0 ) return image class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : VQModel , ) -> Tuple: super().__init__() self.register_modules( unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , ) _A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def snake_case_ ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Dict: # get the original timestep using init_timestep _A = min(int(num_inference_steps * strength ) , __lowerCAmelCase ) _A = max(num_inference_steps - init_timestep , 0 ) _A = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def snake_case_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=None ) -> List[Any]: if not isinstance(__lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}''' ) _A = image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) _A = batch_size * num_images_per_prompt if image.shape[1] == 4: _A = image else: if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase ) ] _A = torch.cat(__lowerCAmelCase , dim=0 ) else: _A = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase ) _A = self.movq.config.scaling_factor * init_latents _A = torch.cat([init_latents] , dim=0 ) _A = init_latents.shape _A = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase ) # get latents _A = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = init_latents return latents def snake_case_ ( self : Optional[int] , __lowerCAmelCase : str=0 ) -> Tuple: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) _A = torch.device(f'''cuda:{gpu_id}''' ) _A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : int , __lowerCAmelCase : str=0 ) -> List[str]: if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) _A = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=__lowerCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _A = None for cpu_offloaded_model in [self.unet, self.movq]: _A , _A = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase ) # We'll offload the last model manually. _A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case_ ( self : Union[str, Any] ) -> Any: if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(__lowerCAmelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__lowerCAmelCase ) def __call__( self : Any , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 1_00 , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : float = 0.3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> List[Any]: _A = self._execution_device _A = guidance_scale > 1.0 if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = torch.cat(__lowerCAmelCase , dim=0 ) _A = image_embeds.shape[0] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = torch.cat(__lowerCAmelCase , dim=0 ) if do_classifier_free_guidance: _A = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 ) _A = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 ) _A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = [image] if not all(isinstance(__lowerCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f'''Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) _A = torch.cat([prepare_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for i in image] , dim=0 ) _A = image.to(dtype=image_embeds.dtype , device=__lowerCAmelCase ) _A = self.movq.encode(__lowerCAmelCase )['''latents'''] _A = latents.repeat_interleave(__lowerCAmelCase , dim=0 ) self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase ) _A , _A = self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = timesteps[:1].repeat(batch_size * num_images_per_prompt ) _A , _A = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor ) _A = self.prepare_latents( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase ) for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance _A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A = {'''image_embeds''': image_embeds} _A = self.unet( sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0] if do_classifier_free_guidance: _A , _A = noise_pred.split(latents.shape[1] , dim=1 ) _A , _A = noise_pred.chunk(2 ) _A , _A = variance_pred.chunk(2 ) _A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _A , _A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0] # post-processing _A = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: _A = image * 0.5 + 0.5 _A = image.clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _A = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCAmelCase )
2
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
1
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } UpperCAmelCase_ = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } UpperCAmelCase_ = { """jukebox""": 5_1_2, } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Any = PRETRAINED_LYRIC_TOKENS_SIZES a__ : str = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int=["v3", "v2", "v2"] , __lowerCAmelCase : str=5_12 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : str="<|endoftext|>" , **__lowerCAmelCase : Tuple , ) -> Union[str, Any]: _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token super().__init__( unk_token=__lowerCAmelCase , n_genres=__lowerCAmelCase , version=__lowerCAmelCase , max_n_lyric_tokens=__lowerCAmelCase , **__lowerCAmelCase , ) _A = version _A = max_n_lyric_tokens _A = n_genres with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: _A = oov.replace(R'''\-\'''' , R'''\-+\'''' ) _A = regex.compile(__lowerCAmelCase ) _A = {v: k for k, v in self.artists_encoder.items()} _A = {v: k for k, v in self.genres_encoder.items()} _A = {v: k for k, v in self.lyrics_encoder.items()} @property def snake_case_ ( self : str ) -> Tuple: return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def snake_case_ ( self : Dict ) -> str: return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def snake_case_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ) -> str: _A = [self.artists_encoder.get(__lowerCAmelCase , 0 ) for artist in list_artists] for genres in range(len(__lowerCAmelCase ) ): _A = [self.genres_encoder.get(__lowerCAmelCase , 0 ) for genre in list_genres[genres]] _A = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) _A = [[self.lyrics_encoder.get(__lowerCAmelCase , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def snake_case_ ( self : int , __lowerCAmelCase : Tuple ) -> Optional[int]: return list(__lowerCAmelCase ) def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , **__lowerCAmelCase : int ) -> Union[str, Any]: _A , _A , _A = self.prepare_for_tokenization(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = self._tokenize(__lowerCAmelCase ) return artist, genre, lyrics def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]: for idx in range(len(self.version ) ): if self.version[idx] == "v3": _A = artists[idx].lower() _A = [genres[idx].lower()] else: _A = self._normalize(artists[idx] ) + '''.v2''' _A = [ self._normalize(__lowerCAmelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": _A = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) _A = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' _A = {vocab[index]: index + 1 for index in range(len(__lowerCAmelCase ) )} _A = 0 _A = len(__lowerCAmelCase ) + 1 _A = self.vocab _A = {v: k for k, v in self.vocab.items()} _A = '''''' else: _A = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) _A = self._run_strip_accents(__lowerCAmelCase ) _A = lyrics.replace('''\\''' , '''\n''' ) _A = self.out_of_vocab.sub('''''' , __lowerCAmelCase ), [], [] return artists, genres, lyrics def snake_case_ ( self : str , __lowerCAmelCase : Tuple ) -> Union[str, Any]: _A = unicodedata.normalize('''NFD''' , __lowerCAmelCase ) _A = [] for char in text: _A = unicodedata.category(__lowerCAmelCase ) if cat == "Mn": continue output.append(__lowerCAmelCase ) return "".join(__lowerCAmelCase ) def snake_case_ ( self : Dict , __lowerCAmelCase : str ) -> str: _A = ( [chr(__lowerCAmelCase ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )] + [chr(__lowerCAmelCase ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )] + [chr(__lowerCAmelCase ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )] + ['''.'''] ) _A = frozenset(__lowerCAmelCase ) _A = re.compile(R'''_+''' ) _A = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) _A = pattern.sub('''_''' , __lowerCAmelCase ).strip('''_''' ) return text def snake_case_ ( self : Any , __lowerCAmelCase : List[str] ) -> str: return " ".join(__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : bool = False ) -> Any: # Convert to TensorType if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = TensorType(__lowerCAmelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf _A = tf.constant _A = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch _A = torch.tensor _A = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 _A = jnp.array _A = _is_jax else: _A = np.asarray _A = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: _A = [inputs] if not is_tensor(__lowerCAmelCase ): _A = as_tensor(__lowerCAmelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]="" , __lowerCAmelCase : Any="pt" ) -> BatchEncoding: _A = [0, 0, 0] _A = [artist] * len(self.version ) _A = [genres] * len(self.version ) _A , _A , _A = self.tokenize(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A , _A , _A = self._convert_token_to_id(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = [-INFINITY] * len(full_tokens[-1] ) _A = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCAmelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCAmelCase ) ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCAmelCase ) ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCAmelCase ) ) return (artists_file, genres_file, lyrics_file) def snake_case_ ( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Union[str, Any]: _A = self.artists_decoder.get(__lowerCAmelCase ) _A = [self.genres_decoder.get(__lowerCAmelCase ) for genre in genres_index] _A = [self.lyrics_decoder.get(__lowerCAmelCase ) for character in lyric_index] return artist, genres, lyrics
2
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2_0_4_8, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" ) -> Tuple: super().__init__() _A = nn.Convad( __lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase ) _A = nn.BatchNormad(__lowerCAmelCase ) _A = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case_ ( self : Tuple , __lowerCAmelCase : Tensor ) -> Tensor: _A = self.convolution(__lowerCAmelCase ) _A = self.normalization(__lowerCAmelCase ) _A = self.activation(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : ResNetConfig ) -> List[Any]: super().__init__() _A = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _A = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _A = config.num_channels def snake_case_ ( self : List[str] , __lowerCAmelCase : Tensor ) -> Tensor: _A = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _A = self.embedder(__lowerCAmelCase ) _A = self.pooler(__lowerCAmelCase ) return embedding class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 ) -> List[str]: super().__init__() _A = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase ) _A = nn.BatchNormad(__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tensor ) -> Tensor: _A = self.convolution(__lowerCAmelCase ) _A = self.normalization(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" ) -> Dict: super().__init__() _A = in_channels != out_channels or stride != 1 _A = ( ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) _A = nn.Sequential( ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase ) , ) _A = ACTaFN[activation] def snake_case_ ( self : Any , __lowerCAmelCase : Any ) -> List[str]: _A = hidden_state _A = self.layer(__lowerCAmelCase ) _A = self.shortcut(__lowerCAmelCase ) hidden_state += residual _A = self.activation(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" , __lowerCAmelCase : int = 4 ) -> Optional[Any]: super().__init__() _A = in_channels != out_channels or stride != 1 _A = out_channels // reduction _A = ( ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) _A = nn.Sequential( ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , ) _A = ACTaFN[activation] def snake_case_ ( self : Any , __lowerCAmelCase : List[str] ) -> Optional[Any]: _A = hidden_state _A = self.layer(__lowerCAmelCase ) _A = self.shortcut(__lowerCAmelCase ) hidden_state += residual _A = self.activation(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : str , __lowerCAmelCase : ResNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , ) -> Tuple: super().__init__() _A = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer _A = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case_ ( self : Any , __lowerCAmelCase : Tensor ) -> Tensor: _A = input for layer in self.layers: _A = layer(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Dict , __lowerCAmelCase : ResNetConfig ) -> Optional[Any]: super().__init__() _A = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( __lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _A = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ): self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: _A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _A = hidden_states + (hidden_state,) _A = stage_module(__lowerCAmelCase ) if output_hidden_states: _A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , ) class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = ResNetConfig a__ : Union[str, Any] = "resnet" a__ : Dict = "pixel_values" a__ : Dict = True def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[str] ) -> Optional[Any]: if isinstance(__lowerCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=False ) -> Any: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = value UpperCAmelCase_ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top." , _A , ) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : str , __lowerCAmelCase : int ) -> Tuple: super().__init__(__lowerCAmelCase ) _A = config _A = ResNetEmbeddings(__lowerCAmelCase ) _A = ResNetEncoder(__lowerCAmelCase ) _A = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case_ ( self : List[str] , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.embedder(__lowerCAmelCase ) _A = self.encoder( __lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) _A = encoder_outputs[0] _A = self.pooler(__lowerCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , ) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: super().__init__(__lowerCAmelCase ) _A = config.num_labels _A = ResNetModel(__lowerCAmelCase ) # classification head _A = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) _A = outputs.pooler_output if return_dict else outputs[1] _A = self.classifier(__lowerCAmelCase ) _A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _A = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _A = '''single_label_classification''' else: _A = '''multi_label_classification''' if self.config.problem_type == "regression": _A = MSELoss() if self.num_labels == 1: _A = loss_fct(logits.squeeze() , labels.squeeze() ) else: _A = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _A = BCEWithLogitsLoss() _A = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) if not return_dict: _A = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states ) @add_start_docstrings( "\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , _A , ) class lowerCamelCase__ ( _A , _A): """simple docstring""" def __init__( self : int , __lowerCAmelCase : str ) -> Union[str, Any]: super().__init__(__lowerCAmelCase ) super()._init_backbone(__lowerCAmelCase ) _A = [config.embedding_size] + config.hidden_sizes _A = ResNetEmbeddings(__lowerCAmelCase ) _A = ResNetEncoder(__lowerCAmelCase ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC ) def snake_case_ ( self : Dict , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BackboneOutput: _A = return_dict if return_dict is not None else self.config.use_return_dict _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = self.embedder(__lowerCAmelCase ) _A = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) _A = outputs.hidden_states _A = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _A = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase_ = """src/diffusers""" UpperCAmelCase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. UpperCAmelCase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCAmelCase_ = spec.loader.load_module() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Tuple ) -> Optional[Any]: return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , _snake_case ) is not None def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Any: _A = object_name.split('''.''' ) _A = 0 # First let's find the module where our object lives. _A = parts[i] while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case , F'''{module}.py''' ) ): i += 1 if i < len(_snake_case ): _A = os.path.join(_snake_case , parts[i] ) if i >= len(_snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(_snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() # Now let's find the class / func in the code! _A = '''''' _A = 0 for name in parts[i + 1 :]: while ( line_index < len(_snake_case ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). _A = line_index while line_index < len(_snake_case ) and _should_continue(lines[line_index] , _snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _A = lines[start_index:line_index] return "".join(_snake_case ) UpperCAmelCase_ = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") UpperCAmelCase_ = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") UpperCAmelCase_ = re.compile(r"""<FILL\s+[^>]*>""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> Optional[int]: _A = code.split('''\n''' ) _A = 0 while idx < len(_snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_snake_case ): return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> Tuple: _A = len(get_indent(_snake_case ) ) > 0 if has_indent: _A = F'''class Bla:\n{code}''' _A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_snake_case ) _A = black.format_str(_snake_case , mode=_snake_case ) _A , _A = style_docstrings_in_code(_snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Optional[int]=False ) -> int: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = [] _A = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_snake_case ): _A = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. _A , _A , _A = search.groups() _A = find_code_in_diffusers(_snake_case ) _A = get_indent(_snake_case ) _A = line_index + 1 if indent == theoretical_indent else line_index + 2 _A = theoretical_indent _A = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. _A = True while line_index < len(_snake_case ) and should_continue: line_index += 1 if line_index >= len(_snake_case ): break _A = lines[line_index] _A = _should_continue(_snake_case , _snake_case ) and re.search(F'''^{indent}# End copy''' , _snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _A = lines[start_index:line_index] _A = ''''''.join(_snake_case ) # Remove any nested `Copied from` comments to avoid circular copies _A = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_snake_case ) is None] _A = '''\n'''.join(_snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(_snake_case ) > 0: _A = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) _A = [_re_replace_pattern.search(_snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue _A , _A , _A = pattern.groups() _A = re.sub(_snake_case , _snake_case , _snake_case ) if option.strip() == "all-casing": _A = re.sub(obja.lower() , obja.lower() , _snake_case ) _A = re.sub(obja.upper() , obja.upper() , _snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line _A = blackify(lines[start_index - 1] + theoretical_code ) _A = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: _A = lines[:start_index] + [theoretical_code] + lines[line_index:] _A = start_index + 1 if overwrite and len(_snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_snake_case ) return diffs def SCREAMING_SNAKE_CASE_ ( _snake_case :bool = False ) -> Tuple: _A = glob.glob(os.path.join(_snake_case , '''**/*.py''' ) , recursive=_snake_case ) _A = [] for filename in all_files: _A = is_copy_consistent(_snake_case , _snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(_snake_case ) > 0: _A = '''\n'''.join(_snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] UpperCAmelCase_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] UpperCAmelCase_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) UpperCAmelCase_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) UpperCAmelCase_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Optional[int] ) -> Optional[Any]: for tf_name, hf_name in patterns: _A = k.replace(_snake_case , _snake_case ) return k def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :dict ) -> BigBirdPegasusForConditionalGeneration: _A = BigBirdPegasusConfig(**_snake_case ) _A = BigBirdPegasusForConditionalGeneration(_snake_case ) _A = torch_model.state_dict() _A = {} # separating decoder weights _A = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} _A = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ): _A = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE] if any(_snake_case ): continue _A = DECODER_PATTERNS _A = rename_state_dict_key(_snake_case , _snake_case ) if new_k not in state_dict: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): _A = v.T _A = torch.from_numpy(_snake_case ) assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ): _A = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE] if any(_snake_case ): continue _A = REMAINING_PATTERNS _A = rename_state_dict_key(_snake_case , _snake_case ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): _A = v.T _A = torch.from_numpy(_snake_case ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' _A = mapping['''model.embed_positions.weight'''] _A = mapping.pop('''model.embed_positions.weight''' ) _A , _A = torch_model.load_state_dict(_snake_case , strict=_snake_case ) _A = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Dict: _A = tf.train.list_variables(_snake_case ) _A = {} _A = ['''global_step'''] for name, shape in tqdm(_snake_case , desc='''converting tf checkpoint to dict''' ): _A = any(pat in name for pat in ignore_name ) if skip_key: continue _A = tf.train.load_variable(_snake_case , _snake_case ) _A = array return tf_weights def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str , _snake_case :dict ) -> str: _A = get_tf_weights_as_numpy(_snake_case ) _A = convert_bigbird_pegasus(_snake_case , _snake_case ) torch_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
2
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
1
UpperCAmelCase_ = [ (1_0_0_0, """M"""), (9_0_0, """CM"""), (5_0_0, """D"""), (4_0_0, """CD"""), (1_0_0, """C"""), (9_0, """XC"""), (5_0, """L"""), (4_0, """XL"""), (1_0, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> int: _A = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} _A = 0 _A = 0 while place < len(_snake_case ): if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> str: _A = [] for arabic, roman in ROMAN: ((_A) , (_A)) = divmod(_snake_case , _snake_case ) result.append(roman * factor ) if number == 0: break return "".join(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
1
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase_ = 1_6 UpperCAmelCase_ = 3_2 def SCREAMING_SNAKE_CASE_ ( _snake_case :Accelerator , _snake_case :int = 16 ) -> Optional[Any]: _A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _A = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_snake_case :Optional[int] ): # max_length=None => use the model max length (it's actually the default) _A = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_snake_case , max_length=_snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _A = datasets.map( _snake_case , batched=_snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _A = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_snake_case :Any ): # On TPU it's best to pad everything to the same length or training will be very slow. _A = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _A = 16 elif accelerator.mixed_precision != "no": _A = 8 else: _A = None return tokenizer.pad( _snake_case , padding='''longest''' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. _A = DataLoader( tokenized_datasets['''train'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) _A = DataLoader( tokenized_datasets['''validation'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase_ = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :str ) -> Optional[int]: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _snake_case ) == "1": _A = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _A = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _A = config['''lr'''] _A = int(config['''num_epochs'''] ) _A = int(config['''seed'''] ) _A = int(config['''batch_size'''] ) set_seed(_snake_case ) _A , _A = get_dataloaders(_snake_case , _snake_case ) _A = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _A = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _A = batch_size // MAX_GPU_BATCH_SIZE _A = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _A = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _A = model.to(accelerator.device ) # Instantiate optimizer _A = AdamW(params=model.parameters() , lr=_snake_case ) # Instantiate scheduler _A = get_linear_schedule_with_warmup( optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _A , _A , _A , _A , _A = accelerator.prepare( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _A = os.path.split(_snake_case )[-1].split('''.''' )[0] accelerator.init_trackers(_snake_case , _snake_case ) # Now we train the model for epoch in range(_snake_case ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _A = 0 for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _A = model(**_snake_case ) _A = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _A = loss / gradient_accumulation_steps accelerator.backward(_snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _A = model(**_snake_case ) _A = outputs.logits.argmax(dim=-1 ) _A , _A = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_snake_case , references=_snake_case , ) _A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _snake_case ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_snake_case ), '''epoch''': epoch, } , step=_snake_case , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: _A = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_snake_case , default=_snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _A = parser.parse_args() _A = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_snake_case , _snake_case ) if __name__ == "__main__": main()
2
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bool: _A = [int(_snake_case ) for i in ip_va_address.split('''.''' ) if i.isdigit()] return len(_snake_case ) == 4 and all(0 <= int(_snake_case ) <= 254 for octet in octets ) if __name__ == "__main__": UpperCAmelCase_ = input().strip() UpperCAmelCase_ = """valid""" if is_ip_va_address_valid(ip) else """invalid""" print(f'{ip} is a {valid_or_invalid} IP v4 address.')
2
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """spiece.model"""} UpperCAmelCase_ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } UpperCAmelCase_ = { """albert-base-v1""": 5_1_2, """albert-large-v1""": 5_1_2, """albert-xlarge-v1""": 5_1_2, """albert-xxlarge-v1""": 5_1_2, """albert-base-v2""": 5_1_2, """albert-large-v2""": 5_1_2, """albert-xlarge-v2""": 5_1_2, """albert-xxlarge-v2""": 5_1_2, } UpperCAmelCase_ = """▁""" class lowerCamelCase__ ( _A): """simple docstring""" a__ : Tuple = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : List[Any]="[SEP]" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : str="[SEP]" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : int="[CLS]" , __lowerCAmelCase : List[str]="[MASK]" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : List[Any] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A = ( AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token ) _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = do_lower_case _A = remove_space _A = keep_accents _A = vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def snake_case_ ( self : List[Any] ) -> int: return len(self.sp_model ) def snake_case_ ( self : Optional[int] ) -> List[str]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> str: _A = self.__dict__.copy() _A = None return state def __setstate__( self : Dict , __lowerCAmelCase : Dict ) -> Tuple: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case_ ( self : str , __lowerCAmelCase : str ) -> Any: if self.remove_space: _A = ''' '''.join(inputs.strip().split() ) else: _A = inputs _A = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: _A = unicodedata.normalize('''NFKD''' , __lowerCAmelCase ) _A = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] ) if self.do_lower_case: _A = outputs.lower() return outputs def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> List[str]: _A = self.preprocess_text(__lowerCAmelCase ) _A = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) _A = [] for piece in pieces: if len(__lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): _A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _A = cur_pieces[1:] else: _A = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCAmelCase ) else: new_pieces.append(__lowerCAmelCase ) return new_pieces def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] ) -> Dict: return self.sp_model.PieceToId(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> List[Any]: return self.sp_model.IdToPiece(__lowerCAmelCase ) def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]: _A = [] _A = '''''' _A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCAmelCase ) + token _A = True _A = [] else: current_sub_tokens.append(__lowerCAmelCase ) _A = False out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
2
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : List[str] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : Any , ) -> Any: super().__init__() _A = value_function _A = unet _A = scheduler _A = env _A = env.get_dataset() _A = {} for key in self.data.keys(): try: _A = self.data[key].mean() except: # noqa: E722 pass _A = {} for key in self.data.keys(): try: _A = self.data[key].std() except: # noqa: E722 pass _A = env.observation_space.shape[0] _A = env.action_space.shape[0] def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int ) -> Any: return (x_in - self.means[key]) / self.stds[key] def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Any: return x_in * self.stds[key] + self.means[key] def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[Any] ) -> List[str]: if type(__lowerCAmelCase ) is dict: return {k: self.to_torch(__lowerCAmelCase ) for k, v in x_in.items()} elif torch.is_tensor(__lowerCAmelCase ): return x_in.to(self.unet.device ) return torch.tensor(__lowerCAmelCase , device=self.unet.device ) def snake_case_ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> List[Any]: for key, val in cond.items(): _A = val.clone() return x_in def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> Tuple: _A = x.shape[0] _A = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model _A = torch.full((batch_size,) , __lowerCAmelCase , device=self.unet.device , dtype=torch.long ) for _ in range(__lowerCAmelCase ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models _A = self.value_function(x.permute(0 , 2 , 1 ) , __lowerCAmelCase ).sample _A = torch.autograd.grad([y.sum()] , [x] )[0] _A = self.scheduler._get_variance(__lowerCAmelCase ) _A = torch.exp(0.5 * posterior_variance ) _A = model_std * grad _A = 0 _A = x.detach() _A = x + scale * grad _A = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim ) _A = self.unet(x.permute(0 , 2 , 1 ) , __lowerCAmelCase ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg _A = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , predict_epsilon=__lowerCAmelCase )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) _A = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim ) _A = self.to_torch(__lowerCAmelCase ) return x, y def __call__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=64 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.1 ) -> List[str]: # normalize the observations and create batch dimension _A = self.normalize(__lowerCAmelCase , '''observations''' ) _A = obs[None].repeat(__lowerCAmelCase , axis=0 ) _A = {0: self.to_torch(__lowerCAmelCase )} _A = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) _A = randn_tensor(__lowerCAmelCase , device=self.unet.device ) _A = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim ) _A = self.to_torch(__lowerCAmelCase ) # run the diffusion process _A , _A = self.run_diffusion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # sort output trajectories by value _A = y.argsort(0 , descending=__lowerCAmelCase ).squeeze() _A = x[sorted_idx] _A = sorted_values[:, :, : self.action_dim] _A = actions.detach().cpu().numpy() _A = self.de_normalize(__lowerCAmelCase , key='''actions''' ) # select the action with the highest value if y is not None: _A = 0 else: # if we didn't run value guiding, select a random action _A = np.random.randint(0 , __lowerCAmelCase ) _A = denorm_actions[selected_index, 0] return denorm_actions
2
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
1
from __future__ import annotations import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCAmelCase_ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list[int]: if not isinstance(_snake_case , _snake_case ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) _A = [] for num in range(len(_snake_case ) ): _A = 0 while 2 * i * i <= odd_composites[num]: _A = odd_composites[num] - 2 * i * i if is_prime(_snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(_snake_case ) == n: return list_nums return [] def SCREAMING_SNAKE_CASE_ ( ) -> int: return compute_nums(1 )[0] if __name__ == "__main__": print(f'{solution() = }')
2
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : str = CodeGenTokenizer a__ : Tuple = CodeGenTokenizerFast a__ : List[str] = True a__ : str = {"add_prefix_space": True} a__ : Tuple = False def snake_case_ ( self : Optional[int] ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _A = {'''unk_token''': '''<unk>'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCAmelCase ) ) def snake_case_ ( self : Union[str, Any] , **__lowerCAmelCase : List[Any] ) -> int: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : int , **__lowerCAmelCase : int ) -> str: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> List[Any]: _A = '''lower newer''' _A = '''lower newer''' return input_text, output_text def snake_case_ ( self : Any ) -> Union[str, Any]: _A = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''lower newer''' _A = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _A = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) _A = tokens + [tokenizer.unk_token] _A = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> Union[str, Any]: if not self.test_rust_tokenizer: return _A = self.get_tokenizer() _A = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase ) _A = '''lower newer''' # Testing tokenization _A = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) _A = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing conversion to ids without special tokens _A = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) _A = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing conversion to ids with special tokens _A = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase ) _A = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) _A = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing the unknown token _A = tokens + [rust_tokenizer.unk_token] _A = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def snake_case_ ( self : str , *__lowerCAmelCase : Dict , **__lowerCAmelCase : str ) -> Any: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case_ ( self : Dict , __lowerCAmelCase : int=15 ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) # Simple input _A = '''This is a simple input''' _A = ['''This is a simple input 1''', '''This is a simple input 2'''] _A = ('''This is a simple input''', '''This is a pair''') _A = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , ) def snake_case_ ( self : List[Any] ) -> Optional[int]: _A = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input _A = '''This is a simple input''' _A = ['''This is a simple input looooooooong''', '''This is a simple input'''] _A = ('''This is a simple input''', '''This is a pair''') _A = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] _A = tokenizer.pad_token_id _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) _A = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' ) _A = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) _A = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def snake_case_ ( self : List[Any] ) -> int: _A = '''$$$''' _A = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase ) _A = '''This is a simple input''' _A = ['''This is a simple input 1''', '''This is a simple input 2'''] _A = tokenizer.bos_token_id _A = tokenizer(__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase ) self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _A = tokenizer.decode(out_s.input_ids ) _A = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __lowerCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def snake_case_ ( self : Tuple ) -> Any: _A = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) _A = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' _A = '''\nif len_a > len_b: result = a\nelse: result = b''' _A = tokenizer.encode(__lowerCAmelCase ) _A = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] _A = tokenizer.decode(__lowerCAmelCase , truncate_before_pattern=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Dict: pass
2
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
1
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=13 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : int=[1, 2, 1] , __lowerCAmelCase : int=[2, 2, 4] , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=2.0 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[str]=10 , __lowerCAmelCase : Dict=8 , ) -> Optional[int]: _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = depths _A = num_heads _A = window_size _A = mlp_ratio _A = qkv_bias _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = drop_path_rate _A = hidden_act _A = use_absolute_embeddings _A = patch_norm _A = layer_norm_eps _A = initializer_range _A = is_training _A = scope _A = use_labels _A = type_sequence_label_size _A = encoder_stride def snake_case_ ( self : List[str] ) -> str: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def snake_case_ ( self : int ) -> List[Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: _A = SwinvaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase ) _A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def snake_case_ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: _A = SwinvaForMaskedImageModeling(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _A = 1 _A = SwinvaForMaskedImageModeling(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ) -> Any: _A = self.type_sequence_label_size _A = SwinvaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case_ ( self : Tuple ) -> Tuple: _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( _A , _A , unittest.TestCase): """simple docstring""" a__ : int = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) a__ : Any = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) a__ : Union[str, Any] = False a__ : Optional[int] = False a__ : Any = False a__ : Optional[int] = False def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: _A = SwinvaModelTester(self ) _A = ConfigTester(self , config_class=__lowerCAmelCase , embed_dim=37 ) def snake_case_ ( self : Tuple ) -> Any: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ ( self : Dict ) -> Tuple: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def snake_case_ ( self : str ) -> str: pass def snake_case_ ( self : List[Any] ) -> Optional[int]: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def snake_case_ ( self : Union[str, Any] ) -> Tuple: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def snake_case_ ( self : Dict ) -> Any: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = True for model_class in self.all_model_classes: _A = True _A = False _A = True _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) _A = outputs.attentions _A = len(self.model_tester.depths ) self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _A = True _A = config.window_size**2 _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) _A = outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) _A = len(__lowerCAmelCase ) # Check attention is always last and order is fine _A = True _A = True _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) if hasattr(self.model_tester , '''num_hidden_states_types''' ): _A = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states _A = 2 self.assertEqual(out_len + added_hidden_states , len(__lowerCAmelCase ) ) _A = outputs.attentions self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def snake_case_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]: _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) _A = outputs.hidden_states _A = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) # Swinv2 has a different seq_length _A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _A = outputs.reshaped_hidden_states self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) _A , _A , _A , _A = reshaped_hidden_states[0].shape _A = ( reshaped_hidden_states[0].view(__lowerCAmelCase , __lowerCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def snake_case_ ( self : int ) -> Tuple: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Optional[int] ) -> Optional[int]: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = 3 _A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) ) def snake_case_ ( self : List[Any] ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def snake_case_ ( self : Optional[Any] ) -> Tuple: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = SwinvaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def snake_case_ ( self : str ) -> Optional[Any]: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = _config_zero_init(__lowerCAmelCase ) for model_class in self.all_model_classes: _A = model_class(config=__lowerCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @cached_property def snake_case_ ( self : List[Any] ) -> int: return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def snake_case_ ( self : str ) -> Dict: _A = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( __lowerCAmelCase ) _A = self.default_image_processor _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _A = model(**__lowerCAmelCase ) # verify the logits _A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) _A = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
2
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCamelCase__ ( datasets.BeamBasedBuilder): """simple docstring""" def snake_case_ ( self : List[Any] ) -> List[str]: return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__lowerCAmelCase , ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> List[Any]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def snake_case_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> List[str]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase ) class lowerCamelCase__ ( datasets.BeamBasedBuilder): """simple docstring""" def snake_case_ ( self : Tuple ) -> int: return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__lowerCAmelCase , ) def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Dict: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> str: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( ) -> Any: return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class lowerCamelCase__ ( _A): """simple docstring""" @require_beam def snake_case_ ( self : Union[str, Any] ) -> List[str]: _A = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _A = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def snake_case_ ( self : int ) -> str: import apache_beam as beam _A = beam.io.parquetio.WriteToParquet _A = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: _A = partial(__lowerCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( __lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertTrue( os.path.exists( os.path.join( __lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _A = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def snake_case_ ( self : Optional[Any] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = DummyBeamDataset(cache_dir=__lowerCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def snake_case_ ( self : Any ) -> int: _A = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = NestedBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) _A = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
2
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_A) class lowerCamelCase__ ( _A): """simple docstring""" a__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True}) a__ : ClassVar[Features] = Features({"audio": Audio()}) a__ : ClassVar[Features] = Features({"transcription": Value("string")}) a__ : str = "audio" a__ : str = "transcription" def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> Optional[int]: if self.audio_column not in features: raise ValueError(f'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , __lowerCAmelCase ): raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' ) _A = copy.deepcopy(self ) _A = self.input_schema.copy() _A = features[self.audio_column] _A = input_schema return task_template @property def snake_case_ ( self : str ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
2
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase__ ( _A): """simple docstring""" a__ : List[str] = ["image_processor", "tokenizer"] a__ : Optional[Any] = "LayoutLMv3ImageProcessor" a__ : Union[str, Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : str , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[int] ) -> Any: _A = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs.pop('''feature_extractor''' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : int , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) # first, apply the image processor _A = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = [text] # add batch dimension (as the image processor always adds a batch dimension) _A = features['''words'''] _A = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) # add pixel values _A = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: _A = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] ) _A = images return encoded_inputs def snake_case_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _A = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' ) return images_with_overflow def snake_case_ ( self : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Optional[int]: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : Optional[int] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ) -> str: return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : List[str] ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def snake_case_ ( self : Optional[Any] ) -> List[str]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class @property def snake_case_ ( self : List[Any] ) -> Tuple: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , ) return self.image_processor
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : List[Any] , ) -> List[str]: super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase ) _A = Sql( cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , ) def snake_case_ ( self : List[Any] ) -> Optional[int]: _A = None _A = None _A = None _A = None self.builder.download_and_prepare( download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , ) # Build dataset for splits _A = self.builder.as_dataset( split='''train''' , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory ) return dataset class lowerCamelCase__ : """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Optional[int] , ) -> str: if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) _A = dataset _A = name _A = con _A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A = num_proc _A = to_sql_kwargs def snake_case_ ( self : str ) -> int: _A = self.to_sql_kwargs.pop('''sql''' , __lowerCAmelCase ) _A = self.to_sql_kwargs.pop('''con''' , __lowerCAmelCase ) _A = self.to_sql_kwargs.pop('''index''' , __lowerCAmelCase ) _A = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs ) return written def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Dict: _A , _A , _A = args _A = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs _A = query_table( table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) _A = batch.to_pandas() _A = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase ) return num_rows or len(__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ) -> int: _A = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
2
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
1
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""", """facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : List[Any] = "encodec" def __init__( self : Union[str, Any] , __lowerCAmelCase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , __lowerCAmelCase : Optional[Any]=2_40_00 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=1_28 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : int=1 , __lowerCAmelCase : List[str]=[8, 5, 4, 2] , __lowerCAmelCase : Optional[int]="weight_norm" , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Union[str, Any]=7 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Dict="reflect" , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=1.0 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : str=True , **__lowerCAmelCase : str , ) -> str: _A = target_bandwidths _A = sampling_rate _A = audio_channels _A = normalize _A = chunk_length_s _A = overlap _A = hidden_size _A = num_filters _A = num_residual_layers _A = upsampling_ratios _A = norm_type _A = kernel_size _A = last_kernel_size _A = residual_kernel_size _A = dilation_growth_rate _A = use_causal_conv _A = pad_mode _A = compress _A = num_lstm_layers _A = trim_right_ratio _A = codebook_size _A = codebook_dim if codebook_dim is not None else hidden_size _A = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**__lowerCAmelCase ) @property def snake_case_ ( self : Dict ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def snake_case_ ( self : int ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def snake_case_ ( self : Optional[Any] ) -> int: _A = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def snake_case_ ( self : int ) -> int: return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
2
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
1
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :str ) -> List[str]: _A = np.argmax(_snake_case , axis=1 ) return np.sum(outputs == labels ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> List[str]: with open(_snake_case , encoding='''utf_8''' ) as f: _A = csv.reader(_snake_case ) _A = [] next(_snake_case ) # skip the first line for line in tqdm(_snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :str , _snake_case :List[str] , _snake_case :Tuple , _snake_case :int , _snake_case :List[str] ) -> Tuple: _A = [] for dataset in encoded_datasets: _A = len(_snake_case ) _A = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) _A = np.zeros((n_batch, 2) , dtype=np.intaa ) _A = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) _A = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_snake_case ): _A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _A = with_conta _A = with_conta _A = len(_snake_case ) - 1 _A = len(_snake_case ) - 1 _A = with_conta _A = with_conta _A = mc_label _A = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) ) return tensor_datasets def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _A = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=_snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=_snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=_snake_case , default='''''' ) parser.add_argument('''--seed''' , type=_snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=_snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=_snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=_snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=_snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=_snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=_snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=_snake_case , default=6.25E-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=_snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=_snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=_snake_case , default=0.01 ) parser.add_argument('''--lm_coef''' , type=_snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=_snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=_snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=_snake_case , default='''''' , help='''Can be used for distant debugging.''' ) _A = parser.parse_args() print(_snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) _A = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(_snake_case , _snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _A = ['''_start_''', '''_delimiter_''', '''_classify_'''] _A = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_snake_case ) _A = tokenizer.convert_tokens_to_ids(_snake_case ) _A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_snake_case ) ) model.to(_snake_case ) # Load and encode the datasets def tokenize_and_encode(_snake_case :Tuple ): if isinstance(_snake_case , _snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) ) elif isinstance(_snake_case , _snake_case ): return obj return [tokenize_and_encode(_snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) _A = load_rocstories_dataset(args.train_dataset ) _A = load_rocstories_dataset(args.eval_dataset ) _A = (train_dataset, eval_dataset) _A = tokenize_and_encode(_snake_case ) # Compute the max input length for the Transformer _A = model.config.n_positions // 2 - 2 _A = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _A = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _A = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case ) _A , _A = tensor_datasets[0], tensor_datasets[1] _A = TensorDataset(*_snake_case ) _A = RandomSampler(_snake_case ) _A = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size ) _A = TensorDataset(*_snake_case ) _A = SequentialSampler(_snake_case ) _A = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _A = args.max_steps _A = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1 else: _A = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs _A = list(model.named_parameters() ) _A = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] _A = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] _A = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) _A = get_linear_schedule_with_warmup( _snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case ) if args.do_train: _A , _A , _A = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): _A = 0 _A = 0 _A = tqdm(_snake_case , desc='''Training''' ) for step, batch in enumerate(_snake_case ): _A = tuple(t.to(_snake_case ) for t in batch ) _A , _A , _A , _A = batch _A = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case ) _A = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _A = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _A = '''Training loss: {:.2e} lr: {:.2e}'''.format(_snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _A = model.module if hasattr(_snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _A = os.path.join(args.output_dir , _snake_case ) _A = os.path.join(args.output_dir , _snake_case ) torch.save(model_to_save.state_dict() , _snake_case ) model_to_save.config.to_json_file(_snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _A = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_snake_case ) if args.do_eval: model.eval() _A , _A = 0, 0 _A , _A = 0, 0 for batch in tqdm(_snake_case , desc='''Evaluating''' ): _A = tuple(t.to(_snake_case ) for t in batch ) _A , _A , _A , _A = batch with torch.no_grad(): _A , _A , _A , _A = model( _snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case ) _A = mc_logits.detach().cpu().numpy() _A = mc_labels.to('''cpu''' ).numpy() _A = accuracy(_snake_case , _snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _A = eval_loss / nb_eval_steps _A = eval_accuracy / nb_eval_examples _A = tr_loss / nb_tr_steps if args.do_train else None _A = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} _A = os.path.join(args.output_dir , '''eval_results.txt''' ) with open(_snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , _snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
2
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
1
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: UpperCAmelCase_ = False UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """ybelkada/fonts""" def SCREAMING_SNAKE_CASE_ ( ) -> int: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' '''Pix2StructImageProcessor. Please upgrade torch.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Optional[int] , _snake_case :int ) -> Any: requires_backends(_snake_case , ['''torch'''] ) _check_torch_version() _A = image_tensor.unsqueeze(0 ) _A = torch.nn.functional.unfold(_snake_case , (patch_height, patch_width) , stride=(patch_height, patch_width) ) _A = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _snake_case , _snake_case , -1 ) _A = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int = 36 , _snake_case :str = "black" , _snake_case :str = "white" , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :Optional[bytes] = None , _snake_case :Optional[str] = None , ) -> Image.Image: requires_backends(_snake_case , '''vision''' ) # Add new lines so that each line is no more than 80 characters. _A = textwrap.TextWrapper(width=80 ) _A = wrapper.wrap(text=_snake_case ) _A = '''\n'''.join(_snake_case ) if font_bytes is not None and font_path is None: _A = io.BytesIO(_snake_case ) elif font_path is not None: _A = font_path else: _A = hf_hub_download(_snake_case , '''Arial.TTF''' ) _A = ImageFont.truetype(_snake_case , encoding='''UTF-8''' , size=_snake_case ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. _A = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _snake_case ) ) _A , _A , _A , _A = temp_draw.textbbox((0, 0) , _snake_case , _snake_case ) # Create the actual image with a bit of padding around the text. _A = text_width + left_padding + right_padding _A = text_height + top_padding + bottom_padding _A = Image.new('''RGB''' , (image_width, image_height) , _snake_case ) _A = ImageDraw.Draw(_snake_case ) draw.text(xy=(left_padding, top_padding) , text=_snake_case , fill=_snake_case , font=_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( _snake_case :np.ndarray , _snake_case :str , **_snake_case :Optional[int] ) -> Any: requires_backends(_snake_case , '''vision''' ) # Convert to PIL image if necessary _A = to_pil_image(_snake_case ) _A = render_text(_snake_case , **_snake_case ) _A = max(header_image.width , image.width ) _A = int(image.height * (new_width / image.width) ) _A = int(header_image.height * (new_width / header_image.width) ) _A = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary _A = to_numpy_array(_snake_case ) if infer_channel_dimension_format(_snake_case ) == ChannelDimension.LAST: _A = to_channel_dimension_format(_snake_case , ChannelDimension.LAST ) return new_image class lowerCamelCase__ ( _A): """simple docstring""" a__ : Optional[Any] = ["flattened_patches"] def __init__( self : Any , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 20_48 , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Any , ) -> None: super().__init__(**__lowerCAmelCase ) _A = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} _A = do_normalize _A = do_convert_rgb _A = max_patches _A = is_vqa def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : dict , **__lowerCAmelCase : str ) -> np.ndarray: requires_backends(self.extract_flattened_patches , '''torch''' ) _check_torch_version() # convert to torch _A = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.FIRST ) _A = torch.from_numpy(__lowerCAmelCase ) _A , _A = patch_size['''height'''], patch_size['''width'''] _A , _A = get_image_size(__lowerCAmelCase ) # maximize scale s.t. _A = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) _A = max(min(math.floor(scale * image_height / patch_height ) , __lowerCAmelCase ) , 1 ) _A = max(min(math.floor(scale * image_width / patch_width ) , __lowerCAmelCase ) , 1 ) _A = max(num_feasible_rows * patch_height , 1 ) _A = max(num_feasible_cols * patch_width , 1 ) _A = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__lowerCAmelCase , antialias=__lowerCAmelCase , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] _A = torch_extract_patches(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = patches.shape _A = patches_shape[1] _A = patches_shape[2] _A = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] _A = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] _A = torch.arange(__lowerCAmelCase ).reshape([rows, 1] ).repeat(1 , __lowerCAmelCase ).reshape([rows * columns, 1] ) _A = torch.arange(__lowerCAmelCase ).reshape([1, columns] ).repeat(__lowerCAmelCase , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] _A = row_ids.to(torch.floataa ) _A = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] _A = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] _A = torch.nn.functional.pad(__lowerCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float() _A = to_numpy_array(__lowerCAmelCase ) return result def snake_case_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : str ) -> np.ndarray: if image.dtype == np.uinta: _A = image.astype(np.floataa ) # take mean across the whole `image` _A = np.mean(__lowerCAmelCase ) _A = np.std(__lowerCAmelCase ) _A = max(__lowerCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : int , ) -> ImageInput: _A = do_normalize if do_normalize is not None else self.do_normalize _A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _A = patch_size if patch_size is not None else self.patch_size _A = max_patches if max_patches is not None else self.max_patches _A = self.is_vqa if kwargs.get('''data_format''' , __lowerCAmelCase ) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''' ) _A = make_list_of_images(__lowerCAmelCase ) if not valid_images(__lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _A = [convert_to_rgb(__lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. _A = [to_numpy_array(__lowerCAmelCase ) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''' ) _A = kwargs.pop('''font_bytes''' , __lowerCAmelCase ) _A = kwargs.pop('''font_path''' , __lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = [header_text] * len(__lowerCAmelCase ) _A = [ render_header(__lowerCAmelCase , header_text[i] , font_bytes=__lowerCAmelCase , font_path=__lowerCAmelCase ) for i, image in enumerate(__lowerCAmelCase ) ] if do_normalize: _A = [self.normalize(image=__lowerCAmelCase ) for image in images] # convert to torch tensor and permute _A = [ self.extract_flattened_patches(image=__lowerCAmelCase , max_patches=__lowerCAmelCase , patch_size=__lowerCAmelCase ) for image in images ] # create attention mask in numpy _A = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] _A = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__lowerCAmelCase ) return encoded_outputs
2
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
1
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class lowerCamelCase__ ( _A): """simple docstring""" a__ : List[Any] = "mctct" def __init__( self : Optional[Any] , __lowerCAmelCase : Dict=80_65 , __lowerCAmelCase : Any=15_36 , __lowerCAmelCase : Optional[int]=36 , __lowerCAmelCase : Dict=61_44 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=3_84 , __lowerCAmelCase : Dict=9_20 , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : Optional[int]=0.3 , __lowerCAmelCase : Any="relu" , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Optional[Any]=0.3 , __lowerCAmelCase : int=0.3 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Tuple=0.3 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : List[str]=(7,) , __lowerCAmelCase : Any=(3,) , __lowerCAmelCase : List[Any]=80 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str="sum" , __lowerCAmelCase : Tuple=False , **__lowerCAmelCase : int , ) -> Tuple: super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = intermediate_size _A = num_attention_heads _A = attention_head_dim _A = max_position_embeddings _A = layer_norm_eps _A = layerdrop _A = hidden_act _A = initializer_range _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = pad_token_id _A = bos_token_id _A = eos_token_id _A = conv_glu_dim _A = conv_dropout _A = num_conv_layers _A = input_feat_per_channel _A = input_channels _A = conv_channels _A = ctc_loss_reduction _A = ctc_zero_infinity # prevents config testing fail with exporting to json _A = list(__lowerCAmelCase ) _A = list(__lowerCAmelCase ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ''' f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
2
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
1
from __future__ import annotations from collections import deque class lowerCamelCase__ : """simple docstring""" def __init__( self : int , __lowerCAmelCase : list[str] ) -> Union[str, Any]: _A = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(__lowerCAmelCase ) self.set_fail_transitions() def snake_case_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def snake_case_ ( self : str , __lowerCAmelCase : str ) -> None: _A = 0 for character in keyword: _A = self.find_next_state(__lowerCAmelCase , __lowerCAmelCase ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) _A = len(self.adlist ) - 1 else: _A = next_state self.adlist[current_state]["output"].append(__lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> None: _A = deque() for node in self.adlist[0]["next_states"]: q.append(__lowerCAmelCase ) _A = 0 while q: _A = q.popleft() for child in self.adlist[r]["next_states"]: q.append(__lowerCAmelCase ) _A = self.adlist[r]['''fail_state'''] while ( self.find_next_state(__lowerCAmelCase , self.adlist[child]['''value'''] ) is None and state != 0 ): _A = self.adlist[state]['''fail_state'''] _A = self.find_next_state( __lowerCAmelCase , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: _A = 0 _A = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> dict[str, list[int]]: _A = {} # returns a dict with keywords and list of its occurrences _A = 0 for i in range(len(__lowerCAmelCase ) ): while ( self.find_next_state(__lowerCAmelCase , string[i] ) is None and current_state != 0 ): _A = self.adlist[current_state]['''fail_state'''] _A = self.find_next_state(__lowerCAmelCase , string[i] ) if next_state is None: _A = 0 else: _A = next_state for key in self.adlist[current_state]["output"]: if key not in result: _A = [] result[key].append(i - len(__lowerCAmelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
2
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
1
UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: _A = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0 UpperCAmelCase_ = True UpperCAmelCase_ = False def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _A = chain(next_number(_snake_case ) ) _A = number_chain while number < 10_000_000: _A = number_chain number *= 10 return number_chain def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_000_000 ) -> int: for i in range(1 , _snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution() = }')
2
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
1
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Any: _A = filter(lambda _snake_case : p.requires_grad , model.parameters() ) _A = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCAmelCase_ = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Any: if metric == "rouge2": _A = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": _A = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": _A = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) _A = ModelCheckpoint( dirpath=_snake_case , filename=_snake_case , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Any ) -> Union[str, Any]: return EarlyStopping( monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_snake_case , verbose=_snake_case , ) class lowerCamelCase__ ( pl.Callback): """simple docstring""" def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> Dict: _A = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__lowerCAmelCase ) @rank_zero_only def snake_case_ ( self : Any , __lowerCAmelCase : pl.Trainer , __lowerCAmelCase : pl.LightningModule , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=True ) -> None: logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _A = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results _A = Path(pl_module.hparams.output_dir ) if type_path == "test": _A = od / '''test_results.txt''' _A = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _A = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' _A = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=__lowerCAmelCase ) generations_file.parent.mkdir(exist_ok=__lowerCAmelCase ) with open(__lowerCAmelCase , '''a+''' ) as writer: for key in sorted(__lowerCAmelCase ): if key in ["log", "progress_bar", "preds"]: continue _A = metrics[key] if isinstance(__lowerCAmelCase , torch.Tensor ): _A = val.item() _A = f'''{key}: {val:.6f}\n''' writer.write(__lowerCAmelCase ) if not save_generations: return if "preds" in metrics: _A = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(__lowerCAmelCase ) @rank_zero_only def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> List[str]: try: _A = pl_module.model.model.num_parameters() except AttributeError: _A = pl_module.model.num_parameters() _A = count_trainable_parameters(__lowerCAmelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} ) @rank_zero_only def snake_case_ ( self : Any , __lowerCAmelCase : pl.Trainer , __lowerCAmelCase : pl.LightningModule ) -> Optional[int]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__lowerCAmelCase , __lowerCAmelCase , '''test''' ) @rank_zero_only def snake_case_ ( self : List[Any] , __lowerCAmelCase : pl.Trainer , __lowerCAmelCase : List[str] ) -> Dict: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""MobileViTFeatureExtractor"""] UpperCAmelCase_ = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
2
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = "mobilenet_v2" def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=2_24 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : Dict=8 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : int=6 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="relu6" , __lowerCAmelCase : int=True , __lowerCAmelCase : int=0.8 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Dict=0.001 , __lowerCAmelCase : Dict=2_55 , **__lowerCAmelCase : Optional[Any] , ) -> Dict: super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) _A = num_channels _A = image_size _A = depth_multiplier _A = depth_divisible_by _A = min_depth _A = expand_ratio _A = output_stride _A = first_layer_is_expansion _A = finegrained_output _A = hidden_act _A = tf_padding _A = classifier_dropout_prob _A = initializer_range _A = layer_norm_eps _A = semantic_loss_ignore_index class lowerCamelCase__ ( _A): """simple docstring""" a__ : Union[str, Any] = version.parse("1.11") @property def snake_case_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def snake_case_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def snake_case_ ( self : Optional[int] ) -> float: return 1E-4
2
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers UpperCAmelCase_ = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Union[str, Any]=None ) -> Any: require_version(deps[pkg] , _snake_case )
2
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
1
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 UpperCAmelCase_ = data_utils.TransfoXLTokenizer UpperCAmelCase_ = data_utils.TransfoXLCorpus UpperCAmelCase_ = data_utils UpperCAmelCase_ = data_utils def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str , _snake_case :List[Any] , _snake_case :Dict ) -> str: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(_snake_case , '''rb''' ) as fp: _A = pickle.load(_snake_case , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) _A = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) _A = corpus.vocab.__dict__ torch.save(_snake_case , _snake_case ) _A = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , _snake_case ) _A = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(_snake_case , _snake_case ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model _A = os.path.abspath(_snake_case ) _A = os.path.abspath(_snake_case ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": _A = TransfoXLConfig() else: _A = TransfoXLConfig.from_json_file(_snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) _A = TransfoXLLMHeadModel(_snake_case ) _A = load_tf_weights_in_transfo_xl(_snake_case , _snake_case , _snake_case ) # Save pytorch-model _A = os.path.join(_snake_case , _snake_case ) _A = os.path.join(_snake_case , _snake_case ) print(F'''Save PyTorch model to {os.path.abspath(_snake_case )}''' ) torch.save(model.state_dict() , _snake_case ) print(F'''Save configuration file to {os.path.abspath(_snake_case )}''' ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) UpperCAmelCase_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
2
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
1
import os import pytest from attr import dataclass UpperCAmelCase_ = """us-east-1""" # defaults region @dataclass class lowerCamelCase__ : """simple docstring""" a__ : str a__ : List[str] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" a__ : List[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } a__ : List[str] = {**hyperparameters, "max_steps": 1000} @property def snake_case_ ( self : Optional[Any] ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def snake_case_ ( self : List[str] ) -> str: return f'''{self.framework}-transfromers-test''' @property def snake_case_ ( self : Dict ) -> str: return f'''./tests/sagemaker/scripts/{self.framework}''' @property def snake_case_ ( self : str ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='''class''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> List[str]: _A = SageMakerTestEnvironment(framework=request.cls.framework )
2
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
1
from collections import namedtuple import requests from lxml import html # type: ignore UpperCAmelCase_ = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: _A = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(_snake_case ).content ).xpath(_snake_case ) ) UpperCAmelCase_ = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
2
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = ["""model.decoder.embed_positions.weights"""] def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> List[Any]: if "emb" in name: _A = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: _A = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: _A = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: _A = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: _A = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: _A = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: _A = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: _A = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: _A = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: _A = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: _A = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def SCREAMING_SNAKE_CASE_ ( _snake_case :OrderedDict , _snake_case :int ) -> Tuple[Dict, Dict]: _A = list(state_dict.keys() ) _A = {} for key in keys: _A = state_dict.pop(_snake_case ) _A = rename_keys(_snake_case ) if "in_proj_weight" in key: # split fused qkv proj _A = val[:hidden_size, :] _A = val[hidden_size : 2 * hidden_size, :] _A = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _A = val else: _A = val return state_dict, enc_dec_proj_state_dict def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values _A = 1_024 _A = 24 _A = 16 elif checkpoint == "medium": _A = 1_536 _A = 48 _A = 24 elif checkpoint == "large": _A = 2_048 _A = 48 _A = 32 else: raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) _A = MusicgenDecoderConfig( hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , ) return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :int=None , _snake_case :int=None , _snake_case :Optional[int]="cpu" ) -> List[str]: _A = MusicGen.get_pretrained(_snake_case , device=_snake_case ) _A = decoder_config_from_checkpoint(_snake_case ) _A = fairseq_model.lm.state_dict() _A , _A = rename_state_dict( _snake_case , hidden_size=decoder_config.hidden_size ) _A = TaEncoderModel.from_pretrained('''t5-base''' ) _A = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) _A = MusicgenForCausalLM(_snake_case ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _A , _A = decoder.load_state_dict(_snake_case , strict=_snake_case ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_snake_case ) if len(_snake_case ) > 0: raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' ) if len(_snake_case ) > 0: raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model _A = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_snake_case ) # check we can do a forward pass _A = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _A = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _A = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits if logits.shape != (8, 1, 2_048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor _A = AutoTokenizer.from_pretrained('''t5-base''' ) _A = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) _A = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) # set the appropriate bos/pad token ids _A = 2_048 _A = 2_048 # set other default generation config params _A = int(30 * audio_encoder.config.frame_rate ) _A = True _A = 3.0 if pytorch_dump_folder is not None: Path(_snake_case ).mkdir(exist_ok=_snake_case ) logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(_snake_case ) processor.save_pretrained(_snake_case ) if repo_id: logger.info(F'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(_snake_case ) processor.push_to_hub(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) UpperCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
2
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
1
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params UpperCAmelCase_ = getLogger(__name__) UpperCAmelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :str , _snake_case :str , _snake_case :int = 8 , _snake_case :str = DEFAULT_DEVICE , _snake_case :List[Any]=False , _snake_case :int="summarization" , _snake_case :str=None , **_snake_case :int , ) -> Dict: _A = Path(_snake_case ).open('''w''' , encoding='''utf-8''' ) _A = str(_snake_case ) _A = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case ) if fpaa: _A = model.half() _A = AutoTokenizer.from_pretrained(_snake_case ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. _A = time.time() # update config with task specific params use_task_specific_params(_snake_case , _snake_case ) if prefix is None: _A = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ): _A = [prefix + text for text in examples_chunk] _A = tokenizer(_snake_case , return_tensors='''pt''' , truncation=_snake_case , padding='''longest''' ).to(_snake_case ) _A = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , ) _A = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() _A = int(time.time() - start_time ) # seconds _A = len(_snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any]=True ) -> Any: _A = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=_snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=_snake_case , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=_snake_case , required=_snake_case , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=_snake_case , required=_snake_case , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=_snake_case , default=-1 , required=_snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=_snake_case , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate _A , _A = parser.parse_known_args() _A = parse_numeric_n_bool_cl_kwargs(_snake_case ) if parsed_args and verbose: print(F'''parsed the following generate kwargs: {parsed_args}''' ) _A = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: _A = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=_snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) _A = generate_summaries_or_translations( _snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , ) if args.reference_path is None: return {} # Compute scores _A = calculate_bleu if '''translation''' in args.task else calculate_rouge _A = [x.rstrip() for x in open(args.save_path ).readlines()] _A = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )] _A = score_fn(_snake_case , _snake_case ) scores.update(_snake_case ) if args.dump_args: scores.update(_snake_case ) if args.info: _A = args.info if verbose: print(_snake_case ) if args.score_path is not None: json.dump(_snake_case , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
2
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
1
from abc import ABC, abstractmethod from typing import List, Optional class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Dict ) -> Tuple: # test for the above condition self.test() def snake_case_ ( self : List[str] ) -> Dict: _A = 0 _A = False while not completed: if counter == 1: self.reset() _A = self.advance() if not self.does_advance(__lowerCAmelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) _A , _A , _A = self.update(__lowerCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def snake_case_ ( self : Dict ) -> str: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case_ ( self : List[Any] , __lowerCAmelCase : int ) -> Any: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : int ) -> Tuple: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case_ ( self : List[str] ) -> Optional[int]: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case_ ( self : List[Any] ) -> int: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def snake_case_ ( self : Tuple , __lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : List[int] ) -> Any: super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) _A = token_ids _A = len(self.token_ids ) _A = -1 # the index of the currently fulfilled step _A = False def snake_case_ ( self : Optional[int] ) -> str: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : int ) -> str: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def snake_case_ ( self : Dict , __lowerCAmelCase : int ) -> str: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _A = False _A = False _A = False if self.does_advance(__lowerCAmelCase ): self.fulfilled_idx += 1 _A = True if self.fulfilled_idx == (self.seqlen - 1): _A = True _A = completed else: # failed to make progress. _A = True self.reset() return stepped, completed, reset def snake_case_ ( self : Union[str, Any] ) -> int: _A = False _A = 0 def snake_case_ ( self : Union[str, Any] ) -> Any: return self.seqlen - (self.fulfilled_idx + 1) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Dict=False ) -> str: _A = PhrasalConstraint(self.token_ids ) if stateful: _A = self.seqlen _A = self.fulfilled_idx _A = self.completed return new_constraint class lowerCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : List[List[int]] , __lowerCAmelCase : Optional[Any]=True ) -> Any: _A = max([len(__lowerCAmelCase ) for one in nested_token_ids] ) _A = {} for token_ids in nested_token_ids: _A = root for tidx, token_id in enumerate(__lowerCAmelCase ): if token_id not in level: _A = {} _A = level[token_id] if no_subsets and self.has_subsets(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' f''' {nested_token_ids}.''' ) _A = root def snake_case_ ( self : Dict , __lowerCAmelCase : str ) -> List[str]: _A = self.trie for current_token in current_seq: _A = start[current_token] _A = list(start.keys() ) return next_tokens def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> int: _A = self.next_tokens(__lowerCAmelCase ) return len(__lowerCAmelCase ) == 0 def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> Optional[Any]: _A = list(root.values() ) if len(__lowerCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(__lowerCAmelCase ) for nn in next_nodes] ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ) -> int: _A = self.count_leaves(__lowerCAmelCase ) return len(__lowerCAmelCase ) != leaf_count class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[List[int]] ) -> Union[str, Any]: super(__lowerCAmelCase , self ).__init__() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) _A = DisjunctiveTrie(__lowerCAmelCase ) _A = nested_token_ids _A = self.trie.max_height _A = [] _A = False def snake_case_ ( self : str ) -> str: _A = self.trie.next_tokens(self.current_seq ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def snake_case_ ( self : List[Any] , __lowerCAmelCase : int ) -> List[str]: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _A = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : int ) -> Tuple: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' ) _A = False _A = False _A = False if self.does_advance(__lowerCAmelCase ): self.current_seq.append(__lowerCAmelCase ) _A = True else: _A = True self.reset() _A = self.trie.reached_leaf(self.current_seq ) _A = completed return stepped, completed, reset def snake_case_ ( self : Tuple ) -> int: _A = False _A = [] def snake_case_ ( self : Any ) -> List[str]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def snake_case_ ( self : str , __lowerCAmelCase : Dict=False ) -> Optional[int]: _A = DisjunctiveConstraint(self.token_ids ) if stateful: _A = self.seqlen _A = self.current_seq _A = self.completed return new_constraint class lowerCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Constraint] ) -> Optional[Any]: _A = constraints # max # of steps required to fulfill a given constraint _A = max([c.seqlen for c in constraints] ) _A = len(__lowerCAmelCase ) _A = False self.init_state() def snake_case_ ( self : int ) -> str: _A = [] _A = None _A = [constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.constraints] def snake_case_ ( self : int ) -> Any: _A = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def snake_case_ ( self : Any ) -> str: _A = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" _A = constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) else: _A = self.inprogress_constraint.advance() if isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.append(__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): token_list.extend(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None else: return token_list def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[List[int]] ) -> Dict: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint _A , _A = self.add(__lowerCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def snake_case_ ( self : Any , __lowerCAmelCase : int ) -> Optional[int]: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) _A , _A = False, False if self.completed: _A = True _A = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state _A , _A , _A = self.inprogress_constraint.update(__lowerCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) ) _A = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) _A = None if len(self.pending_constraints ) == 0: # we're done! _A = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(__lowerCAmelCase ): _A , _A , _A = pending_constraint.update(__lowerCAmelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(__lowerCAmelCase ) _A = None if not complete and stepped: _A = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". _A = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. _A = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def snake_case_ ( self : Tuple , __lowerCAmelCase : Union[str, Any]=True ) -> Optional[Any]: _A = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: _A = [ constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: _A = self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) _A = [constraint.copy() for constraint in self.pending_constraints] return new_state
2
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[str]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : Tuple=4_00 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , ) -> Optional[Any]: _A = size if size is not None else {'''shortest_edge''': 18} _A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_center_crop _A = crop_size _A = do_normalize _A = image_mean _A = image_std def snake_case_ ( self : Optional[Any] ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : str = LevitImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[Any] ) -> Dict: _A = LevitImageProcessingTester(self ) @property def snake_case_ ( self : str ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Tuple ) -> Optional[int]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : Any ) -> Union[str, Any]: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case_ ( self : Optional[int] ) -> Dict: pass def snake_case_ ( self : List[Any] ) -> Any: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case_ ( self : Optional[Any] ) -> Tuple: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case_ ( self : Union[str, Any] ) -> Dict: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
2
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
1
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline UpperCAmelCase_ = { """n_samples""": 6_4, """horizon""": 3_2, """num_inference_steps""": 2_0, """n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network """scale_grad_by_std""": True, """scale""": 0.1, """eta""": 0.0, """t_grad_cutoff""": 2, """device""": """cpu""", } if __name__ == "__main__": UpperCAmelCase_ = """hopper-medium-v2""" UpperCAmelCase_ = gym.make(env_name) UpperCAmelCase_ = ValueGuidedRLPipeline.from_pretrained( """bglick13/hopper-medium-v2-value-function-hor32""", env=env, ) env.seed(0) UpperCAmelCase_ = env.reset() UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 1_0_0_0 UpperCAmelCase_ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy UpperCAmelCase_ = pipeline(obs, planning_horizon=3_2) # execute action in environment UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = env.step(denorm_actions) UpperCAmelCase_ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:' f' {total_score}' ) # save observations for rendering rollout.append(next_observation.copy()) UpperCAmelCase_ = next_observation except KeyboardInterrupt: pass print(f'Total reward: {total_reward}')
2
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 8 ) -> str: _A = ascii_letters + digits + punctuation return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(_snake_case ) _A = i // 3 _A = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) _A = ( chars_incl + random(_snake_case , quotient + remainder ) + random(_snake_case , _snake_case ) + random(_snake_case , _snake_case ) ) _A = list(_snake_case ) shuffle(_snake_case ) return "".join(_snake_case ) # random is a generalised function for letters, characters and numbers def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> str: return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] ) -> str: pass # Put your code here... def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Union[str, Any] ) -> str: pass # Put your code here... def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Dict ) -> str: pass # Put your code here... def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int = 8 ) -> bool: if len(_snake_case ) < min_length: # Your Password must be at least 8 characters long return False _A = any(char in ascii_uppercase for char in password ) _A = any(char in ascii_lowercase for char in password ) _A = any(char in digits for char in password ) _A = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def SCREAMING_SNAKE_CASE_ ( ) -> Any: _A = int(input('''Please indicate the max length of your password: ''' ).strip() ) _A = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(_snake_case ) ) print( '''Alternative Password generated:''' , alternative_password_generator(_snake_case , _snake_case ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
2
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
1
from __future__ import annotations UpperCAmelCase_ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] , _snake_case :list[int] , _snake_case :list[int] , _snake_case :int , _snake_case :list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: _A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_snake_case ) ) ] # the reference grid _A = 1 _A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_snake_case ) ) ] # the action grid _A = init[0] _A = init[1] _A = 0 _A = g + heuristic[x][y] # cost from starting cell to destination cell _A = [[f, g, x, y]] _A = False # flag that is set when search is complete _A = False # flag set if we can't find expand while not found and not resign: if len(_snake_case ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() _A = cell.pop() _A = next_cell[2] _A = next_cell[3] _A = next_cell[1] if x == goal[0] and y == goal[1]: _A = True else: for i in range(len(_snake_case ) ): # to try out different valid actions _A = x + DIRECTIONS[i][0] _A = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(_snake_case ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: _A = g + cost _A = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) _A = 1 _A = i _A = [] _A = goal[0] _A = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: _A = x - DIRECTIONS[action[x][y]][0] _A = y - DIRECTIONS[action[x][y]][1] _A = xa _A = ya invpath.append([x, y] ) _A = [] for i in range(len(_snake_case ) ): path.append(invpath[len(_snake_case ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase_ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase_ = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase_ = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase_ = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase_ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase_ = 9_9 UpperCAmelCase_ ,UpperCAmelCase_ = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list: _A = int(_snake_case ) if n_element < 1: _A = ValueError('''a should be a positive number''' ) raise my_error _A = [1] _A , _A , _A = (0, 0, 0) _A = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": UpperCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") UpperCAmelCase_ = hamming(int(n)) print("""-----------------------------------------------------""") print(f'The list with nth numbers is: {hamming_numbers}') print("""-----------------------------------------------------""")
2
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
1
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "mask2former" a__ : Tuple = ["swin"] a__ : Optional[int] = {"hidden_size": "hidden_dim"} def __init__( self : Any , __lowerCAmelCase : Optional[Dict] = None , __lowerCAmelCase : int = 2_56 , __lowerCAmelCase : int = 2_56 , __lowerCAmelCase : int = 2_56 , __lowerCAmelCase : int = 10_24 , __lowerCAmelCase : str = "relu" , __lowerCAmelCase : int = 6 , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 20_48 , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : int = 4 , __lowerCAmelCase : int = 2_55 , __lowerCAmelCase : int = 1_00 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 5.0 , __lowerCAmelCase : float = 5.0 , __lowerCAmelCase : int = 1_25_44 , __lowerCAmelCase : float = 3.0 , __lowerCAmelCase : float = 0.75 , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : bool = True , __lowerCAmelCase : List[int] = [4, 8, 16, 32] , __lowerCAmelCase : bool = None , **__lowerCAmelCase : Any , ) -> Dict: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' ) _A = CONFIG_MAPPING['''swin''']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = backbone_config.pop('''model_type''' ) _A = CONFIG_MAPPING[backbone_model_type] _A = config_class.from_dict(__lowerCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' f'''Supported model types: {','.join(self.backbones_supported )}''' ) _A = backbone_config _A = feature_size _A = mask_feature_size _A = hidden_dim _A = encoder_feedforward_dim _A = activation_function _A = encoder_layers _A = decoder_layers _A = num_attention_heads _A = dropout _A = dim_feedforward _A = pre_norm _A = enforce_input_projection _A = common_stride _A = ignore_value _A = num_queries _A = no_object_weight _A = class_weight _A = mask_weight _A = dice_weight _A = train_num_points _A = oversample_ratio _A = importance_sample_ratio _A = init_std _A = init_xavier_std _A = use_auxiliary_loss _A = feature_strides _A = output_auxiliary_logits _A = decoder_layers super().__init__(**__lowerCAmelCase ) @classmethod def snake_case_ ( cls : str , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ) -> int: return cls( backbone_config=__lowerCAmelCase , **__lowerCAmelCase , ) def snake_case_ ( self : Dict ) -> Dict[str, any]: _A = copy.deepcopy(self.__dict__ ) _A = self.backbone_config.to_dict() _A = self.__class__.model_type return output
2
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
1
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset UpperCAmelCase_ = pd.read_csv( """https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/""" """position_salaries.csv""" ) UpperCAmelCase_ = dataset.iloc[:, 1:2].values UpperCAmelCase_ = dataset.iloc[:, 2].values UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) UpperCAmelCase_ = PolynomialFeatures(degree=4) UpperCAmelCase_ = poly_reg.fit_transform(X) UpperCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: plt.scatter(_snake_case , _snake_case , color='''red''' ) plt.plot(_snake_case , pol_reg.predict(poly_reg.fit_transform(_snake_case ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
2
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
1
from math import factorial def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 100 ) -> int: return sum(int(_snake_case ) for x in str(factorial(_snake_case ) ) ) if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
2
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 100 ) -> int: _A = set() _A = 0 _A = n + 1 # maximum limit for a in range(2 , _snake_case ): for b in range(2 , _snake_case ): _A = a**b # calculates the current power collect_powers.add(_snake_case ) # adds the result to the set return len(_snake_case ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
2
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Tuple = "blenderbot-small" a__ : str = ["past_key_values"] a__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , __lowerCAmelCase : str=5_02_65 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : Union[str, Any]=20_48 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : List[str]=8 , __lowerCAmelCase : Optional[int]=20_48 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : int=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=5_12 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Dict=2 , **__lowerCAmelCase : Optional[Any] , ) -> List[Any]: _A = vocab_size _A = max_position_embeddings _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) class lowerCamelCase__ ( _A): """simple docstring""" @property def snake_case_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _A = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _A = {0: '''batch'''} _A = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _A = {0: '''batch''', 1: '''decoder_sequence'''} _A = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _A = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _A , _A = self.num_layers for i in range(__lowerCAmelCase ): _A = {0: '''batch''', 2: '''past_sequence + sequence'''} _A = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _A = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def snake_case_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _A = super().outputs else: _A = super(__lowerCAmelCase , self ).outputs if self.use_past: _A , _A = self.num_layers for i in range(__lowerCAmelCase ): _A = {0: '''batch''', 2: '''past_sequence + sequence'''} _A = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def snake_case_ ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: _A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Generate decoder inputs _A = seq_length if not self.use_past else 1 _A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _A = dict(**__lowerCAmelCase , **__lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _A , _A = common_inputs['''input_ids'''].shape _A = common_inputs['''decoder_input_ids'''].shape[1] _A , _A = self.num_attention_heads _A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _A = decoder_seq_length + 3 _A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _A = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 ) _A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _A , _A = self.num_layers _A = min(__lowerCAmelCase , __lowerCAmelCase ) _A = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers _A = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase ), ) ) # TODO: test this. _A = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__lowerCAmelCase , __lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) ) return common_inputs def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: _A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _A , _A = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _A = seqlen + 2 _A , _A = self.num_layers _A , _A = self.num_attention_heads _A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _A = common_inputs['''attention_mask'''].dtype _A = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 ) _A = [ (torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase ) ] return common_inputs def snake_case_ ( self : Any , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _A = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _A = tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _A = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _A = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _A = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) ) return common_inputs def snake_case_ ( self : List[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) elif self.task == "causal-lm": _A = self._generate_dummy_inputs_for_causal_lm( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) else: _A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) return common_inputs def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: if self.task in ["default", "seq2seq-lm"]: _A = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: _A = super(__lowerCAmelCase , self )._flatten_past_key_values_( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
2
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
1
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def SCREAMING_SNAKE_CASE_ ( ) -> Any: _A = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=_snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=_snake_case , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=_snake_case , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=_snake_case , default=0 , help='''cuda_id.''' , ) _A = parser.parse_args() return args def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Dict , _snake_case :Any ) -> List[Any]: if not len(_snake_case ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) _A , _A = imgs[0].size _A = Image.new('''RGB''' , size=(cols * w, rows * h) ) _A , _A = grid.size for i, img in enumerate(_snake_case ): grid.paste(_snake_case , box=(i % cols * w, i // cols * h) ) return grid def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :Union[str, Any]="robotic cat with wings" , _snake_case :List[str]=7.5 , _snake_case :Optional[int]=50 , _snake_case :List[str]=1 , _snake_case :List[str]=42 , ) -> List[str]: _A = torch.Generator(pipeline.device ).manual_seed(_snake_case ) _A = pipeline( _snake_case , guidance_scale=_snake_case , num_inference_steps=_snake_case , generator=_snake_case , num_images_per_prompt=_snake_case , ).images _A = int(math.sqrt(_snake_case ) ) _A = image_grid(_snake_case , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images UpperCAmelCase_ = parse_args() # Load models and create wrapper for stable diffusion UpperCAmelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""") UpperCAmelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""") UpperCAmelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""") UpperCAmelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""") UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) UpperCAmelCase_ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")): UpperCAmelCase_ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, """unet""", unet) else: UpperCAmelCase_ = unet.to(torch.device("""cuda""", args.cuda_id)) UpperCAmelCase_ = pipeline.to(unet.device) UpperCAmelCase_ ,UpperCAmelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split())))) UpperCAmelCase_ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record UpperCAmelCase_ = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ UpperCAmelCase_ = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ UpperCAmelCase_ = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Dict ) -> Optional[int]: return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int , _snake_case :str="binary" ) -> int: _A = simple_accuracy(_snake_case , _snake_case ) _A = float(fa_score(y_true=_snake_case , y_pred=_snake_case , average=_snake_case ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :int ) -> Any: _A = {} for id_pred, label in zip(_snake_case , _snake_case ): _A = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' _A = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _A = [(pred, label)] _A , _A = [], [] for question, preds_labels in question_map.items(): _A , _A = zip(*_snake_case ) _A = fa_score(y_true=_snake_case , y_pred=_snake_case , average='''macro''' ) fas.append(_snake_case ) _A = int(sum(pred == label for pred, label in preds_labels ) == len(_snake_case ) ) ems.append(_snake_case ) _A = float(sum(_snake_case ) / len(_snake_case ) ) _A = sum(_snake_case ) / len(_snake_case ) _A = float(fa_score(y_true=_snake_case , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__ ( datasets.Metric): """simple docstring""" def snake_case_ ( self : Tuple ) -> List[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def snake_case_ ( self : str ) -> Dict: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def snake_case_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase , fa_avg='''macro''' ) elif self.config_name == "record": _A = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] _A = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(__lowerCAmelCase , __lowerCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__lowerCAmelCase , __lowerCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 50 ) -> int: _A = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
2
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
1
from functools import lru_cache def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> set: _A = 2 _A = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_snake_case ) if n > 1: factors.add(_snake_case ) return factors @lru_cache def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return len(unique_prime_factors(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: return len(set(_snake_case ) ) in (0, 1) def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list: _A = 2 while True: # Increment each value of a generated range _A = [base + i for i in range(_snake_case )] # Run elements through out unique_prime_factors function # Append our target number to the end. _A = [upf_len(_snake_case ) for x in group] checker.append(_snake_case ) # If all numbers in the list are equal, return the group variable. if equality(_snake_case ): return group # Increment our base variable by 1 base += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 4 ) -> int: _A = run(_snake_case ) return results[0] if len(_snake_case ) else None if __name__ == "__main__": print(solution())
2
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__ : """simple docstring""" a__ : str a__ : str = None @staticmethod def snake_case_ ( ) -> List[str]: raise NotImplementedError def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : str ) -> int: raise NotImplementedError def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : int ) -> List[Any]: raise NotImplementedError def snake_case_ ( self : Union[str, Any] ) -> Optional[int]: if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def snake_case_ ( cls : Optional[Any] ) -> List[str]: return f'''`pip install {cls.pip_package or cls.name}`''' class lowerCamelCase__ ( _A): """simple docstring""" a__ : List[str] = "optuna" @staticmethod def snake_case_ ( ) -> int: return is_optuna_available() def snake_case_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : int ) -> Any: return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , __lowerCAmelCase : Tuple ) -> Dict: return default_hp_space_optuna(__lowerCAmelCase ) class lowerCamelCase__ ( _A): """simple docstring""" a__ : List[str] = "ray" a__ : Tuple = "'ray[tune]'" @staticmethod def snake_case_ ( ) -> List[str]: return is_ray_available() def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ) -> Optional[int]: return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> int: return default_hp_space_ray(__lowerCAmelCase ) class lowerCamelCase__ ( _A): """simple docstring""" a__ : Optional[int] = "sigopt" @staticmethod def snake_case_ ( ) -> Optional[Any]: return is_sigopt_available() def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : int ) -> List[str]: return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Dict ) -> int: return default_hp_space_sigopt(__lowerCAmelCase ) class lowerCamelCase__ ( _A): """simple docstring""" a__ : Union[str, Any] = "wandb" @staticmethod def snake_case_ ( ) -> int: return is_wandb_available() def snake_case_ ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : List[str] ) -> Dict: return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> Dict: return default_hp_space_wandb(__lowerCAmelCase ) UpperCAmelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def SCREAMING_SNAKE_CASE_ ( ) -> str: _A = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_snake_case ) > 0: _A = available_backends[0].name if len(_snake_case ) > 1: logger.info( F'''{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( F''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
2
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
1
import pytest UpperCAmelCase_ = """__dummy_dataset1__""" UpperCAmelCase_ = """ import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Optional[int] , _snake_case :Optional[Any] ) -> Optional[int]: _A = dataset_loading_script_name _A = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=_snake_case ) _A = script_dir / F'''{script_name}.py''' with open(_snake_case , '''w''' ) as f: f.write(_snake_case ) return str(_snake_case )
2
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
1
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Any=[1, 2, 1] , __lowerCAmelCase : int=[2, 2, 4] , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=2.0 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[Any]=10 , __lowerCAmelCase : Optional[Any]=8 , __lowerCAmelCase : List[str]=["stage1", "stage2", "stage3"] , __lowerCAmelCase : int=[1, 2, 3] , ) -> Union[str, Any]: _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = depths _A = num_heads _A = window_size _A = mlp_ratio _A = qkv_bias _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = drop_path_rate _A = hidden_act _A = use_absolute_embeddings _A = patch_norm _A = layer_norm_eps _A = initializer_range _A = is_training _A = scope _A = use_labels _A = type_sequence_label_size _A = encoder_stride _A = out_features _A = out_indices def snake_case_ ( self : Tuple ) -> Dict: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def snake_case_ ( self : Optional[Any] ) -> Any: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Optional[Any]: _A = MaskFormerSwinModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase ) _A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> Tuple: _A = MaskFormerSwinBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__lowerCAmelCase ): _A = ['''stem'''] _A = MaskFormerSwinBackbone(config=__lowerCAmelCase ) def snake_case_ ( self : List[str] ) -> Union[str, Any]: _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( _A , _A , unittest.TestCase): """simple docstring""" a__ : List[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a__ : Dict = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} a__ : Union[str, Any] = False a__ : str = False a__ : str = False a__ : List[Any] = False a__ : Optional[Any] = False def snake_case_ ( self : Optional[Any] ) -> int: _A = MaskFormerSwinModelTester(self ) _A = ConfigTester(self , config_class=__lowerCAmelCase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with''' ''' `nn.DataParallel`''' ) ) def snake_case_ ( self : Tuple ) -> List[Any]: pass def snake_case_ ( self : List[str] ) -> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ ( self : str ) -> Any: return def snake_case_ ( self : List[Any] ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCAmelCase ) @unittest.skip('''Swin does not use inputs_embeds''' ) def snake_case_ ( self : Tuple ) -> Union[str, Any]: pass @unittest.skip('''Swin does not support feedforward chunking''' ) def snake_case_ ( self : str ) -> Any: pass def snake_case_ ( self : List[Any] ) -> Union[str, Any]: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def snake_case_ ( self : List[str] ) -> Dict: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' ) def snake_case_ ( self : int ) -> Dict: pass @unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' ) def snake_case_ ( self : Optional[Any] ) -> int: pass def snake_case_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] ) -> Optional[Any]: _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) _A = outputs.hidden_states _A = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) # Swin has a different seq_length _A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def snake_case_ ( self : Any ) -> int: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[Any]: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = 3 _A = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _A = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) ) @unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' ) def snake_case_ ( self : Union[str, Any] ) -> List[Any]: pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def snake_case_ ( self : int ) -> Union[str, Any]: pass def snake_case_ ( self : List[Any] ) -> Any: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCAmelCase : Dict ): _A = 0 return t def check_equivalence(__lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]={} ): with torch.no_grad(): _A = model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ) _A = model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ).to_tuple() def recursive_check(__lowerCAmelCase : int , __lowerCAmelCase : Dict ): if isinstance(__lowerCAmelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCAmelCase , __lowerCAmelCase ): recursive_check(__lowerCAmelCase , __lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(__lowerCAmelCase , __lowerCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCAmelCase ) , set_nan_tensor_to_zero(__lowerCAmelCase ) , atol=1E-5 ) , msg=( '''Tuple and dict output are not equal. Difference:''' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}. Dict has''' f''' `nan`: {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}.''' ) , ) recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {'''output_hidden_states''': True} ) _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {'''output_hidden_states''': True} ) @require_torch class lowerCamelCase__ ( unittest.TestCase , _A): """simple docstring""" a__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else () a__ : Any = MaskFormerSwinConfig def snake_case_ ( self : Any ) -> Any: _A = MaskFormerSwinModelTester(self ) def snake_case_ ( self : List[Any] ) -> Tuple: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = inputs_dict['''pixel_values'''].shape[0] for backbone_class in self.all_model_classes: _A = backbone_class(__lowerCAmelCase ) backbone.to(__lowerCAmelCase ) backbone.eval() _A = backbone(**__lowerCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _A = backbone(**__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _A = backbone(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertIsNotNone(outputs.attentions )
2
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Optional[Any] = AudioLDMPipeline a__ : Optional[Any] = TEXT_TO_AUDIO_PARAMS a__ : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS a__ : int = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ]) def snake_case_ ( self : Optional[Any] ) -> Dict: torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowerCAmelCase , ) _A = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) _A = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , ) _A = ClapTextModelWithProjection(__lowerCAmelCase ) _A = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 ) _A = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowerCAmelCase , ) _A = SpeechTaHifiGan(__lowerCAmelCase ) _A = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=0 ) -> Optional[Any]: if str(__lowerCAmelCase ).startswith('''mps''' ): _A = torch.manual_seed(__lowerCAmelCase ) else: _A = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _A = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def snake_case_ ( self : List[str] ) -> int: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = AudioLDMPipeline(**__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = self.get_dummy_inputs(__lowerCAmelCase ) _A = audioldm_pipe(**__lowerCAmelCase ) _A = output.audios[0] assert audio.ndim == 1 assert len(__lowerCAmelCase ) == 2_56 _A = audio[:10] _A = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def snake_case_ ( self : Optional[int] ) -> str: _A = self.get_dummy_components() _A = AudioLDMPipeline(**__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = self.get_dummy_inputs(__lowerCAmelCase ) _A = 3 * [inputs['''prompt''']] # forward _A = audioldm_pipe(**__lowerCAmelCase ) _A = output.audios[0] _A = self.get_dummy_inputs(__lowerCAmelCase ) _A = 3 * [inputs.pop('''prompt''' )] _A = audioldm_pipe.tokenizer( __lowerCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , ) _A = text_inputs['''input_ids'''].to(__lowerCAmelCase ) _A = audioldm_pipe.text_encoder( __lowerCAmelCase , ) _A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state _A = F.normalize(__lowerCAmelCase , dim=-1 ) _A = prompt_embeds # forward _A = audioldm_pipe(**__lowerCAmelCase ) _A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def snake_case_ ( self : List[Any] ) -> Union[str, Any]: _A = self.get_dummy_components() _A = AudioLDMPipeline(**__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = self.get_dummy_inputs(__lowerCAmelCase ) _A = 3 * ['''this is a negative prompt'''] _A = negative_prompt _A = 3 * [inputs['''prompt''']] # forward _A = audioldm_pipe(**__lowerCAmelCase ) _A = output.audios[0] _A = self.get_dummy_inputs(__lowerCAmelCase ) _A = 3 * [inputs.pop('''prompt''' )] _A = [] for p in [prompt, negative_prompt]: _A = audioldm_pipe.tokenizer( __lowerCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , ) _A = text_inputs['''input_ids'''].to(__lowerCAmelCase ) _A = audioldm_pipe.text_encoder( __lowerCAmelCase , ) _A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state _A = F.normalize(__lowerCAmelCase , dim=-1 ) embeds.append(__lowerCAmelCase ) _A , _A = embeds # forward _A = audioldm_pipe(**__lowerCAmelCase ) _A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def snake_case_ ( self : Tuple ) -> Dict: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = PNDMScheduler(skip_prk_steps=__lowerCAmelCase ) _A = AudioLDMPipeline(**__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = self.get_dummy_inputs(__lowerCAmelCase ) _A = '''egg cracking''' _A = audioldm_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase ) _A = output.audios[0] assert audio.ndim == 1 assert len(__lowerCAmelCase ) == 2_56 _A = audio[:10] _A = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = PNDMScheduler(skip_prk_steps=__lowerCAmelCase ) _A = AudioLDMPipeline(**__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) _A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts _A = 2 _A = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt _A = 2 _A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts _A = 2 _A = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def snake_case_ ( self : Tuple ) -> Any: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = AudioLDMPipeline(**__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = audioldm_pipe.vocoder.config.sampling_rate _A = self.get_dummy_inputs(__lowerCAmelCase ) _A = audioldm_pipe(audio_length_in_s=0.016 , **__lowerCAmelCase ) _A = output.audios[0] assert audio.ndim == 1 assert len(__lowerCAmelCase ) / vocoder_sampling_rate == 0.016 _A = audioldm_pipe(audio_length_in_s=0.032 , **__lowerCAmelCase ) _A = output.audios[0] assert audio.ndim == 1 assert len(__lowerCAmelCase ) / vocoder_sampling_rate == 0.032 def snake_case_ ( self : str ) -> List[Any]: _A = self.get_dummy_components() _A = AudioLDMPipeline(**__lowerCAmelCase ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = ['''hey'''] _A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1 ) _A = output.audios.shape assert audio_shape == (1, 2_56) _A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 _A = SpeechTaHifiGan(__lowerCAmelCase ).to(__lowerCAmelCase ) _A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1 ) _A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def snake_case_ ( self : Optional[int] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCAmelCase ) def snake_case_ ( self : Dict ) -> Optional[Any]: self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowerCAmelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def snake_case_ ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase ) @slow class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Optional[int] ) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]="cpu" , __lowerCAmelCase : List[Any]=torch.floataa , __lowerCAmelCase : List[Any]=0 ) -> int: _A = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _A = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 8, 1_28, 16) ) _A = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) _A = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def snake_case_ ( self : Optional[Any] ) -> List[Any]: _A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = self.get_inputs(__lowerCAmelCase ) _A = 25 _A = audioldm_pipe(**__lowerCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__lowerCAmelCase ) == 8_19_20 _A = audio[7_72_30:7_72_40] _A = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) _A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]: _A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) _A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) _A = audioldm_pipe.to(__lowerCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _A = self.get_inputs(__lowerCAmelCase ) _A = audioldm_pipe(**__lowerCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__lowerCAmelCase ) == 8_19_20 _A = audio[2_77_80:2_77_90] _A = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) _A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
2
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> Tuple: _A = 1 _A = 2 while i * i <= n: _A = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: _A = 1 _A = 1 while True: i += 1 t_num += i if count_divisors(_snake_case ) > 500: break return t_num if __name__ == "__main__": print(solution())
2
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
1
import string def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> None: for key in range(len(string.ascii_uppercase ) ): _A = '''''' for symbol in message: if symbol in string.ascii_uppercase: _A = string.ascii_uppercase.find(_snake_case ) _A = num - key if num < 0: _A = num + len(string.ascii_uppercase ) _A = translated + string.ascii_uppercase[num] else: _A = translated + symbol print(F'''Decryption using Key #{key}: {translated}''' ) def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = input('''Encrypted message: ''' ) _A = message.upper() decrypt(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
2
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
1
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def SCREAMING_SNAKE_CASE_ ( *_snake_case :Optional[int] ) -> Optional[int]: if not isinstance(_snake_case , _snake_case ): _A = list(_snake_case ) for i in range(len(_snake_case ) ): _A = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def SCREAMING_SNAKE_CASE_ ( _snake_case :Exception ) -> bool: _A = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(_snake_case , _snake_case ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def SCREAMING_SNAKE_CASE_ ( _snake_case :callable = None , _snake_case :int = 128 ) -> Optional[int]: if function is None: return functools.partial(_snake_case , starting_batch_size=_snake_case ) _A = starting_batch_size def decorator(*_snake_case :Tuple , **_snake_case :List[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _A = list(inspect.signature(_snake_case ).parameters.keys() ) # Guard against user error if len(_snake_case ) < (len(_snake_case ) + 1): _A = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(_snake_case , *_snake_case , **_snake_case ) except Exception as e: if should_reduce_batch_size(_snake_case ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
2
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
1
from math import isqrt, loga def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list[int]: _A = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , _snake_case , _snake_case ): _A = False return [i for i in range(2 , _snake_case ) if is_prime[i]] def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 800_800 , _snake_case :int = 800_800 ) -> int: _A = degree * loga(_snake_case ) _A = int(_snake_case ) _A = calculate_prime_numbers(_snake_case ) _A = 0 _A = 0 _A = len(_snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'{solution() = }')
2
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 600_851_475_143 ) -> int: try: _A = int(_snake_case ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) _A = 1 _A = 2 while i * i <= n: while n % i == 0: _A = i n //= i i += 1 if n > 1: _A = n return int(_snake_case ) if __name__ == "__main__": print(f'{solution() = }')
2
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase_ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase_ = TaTokenizerFast UpperCAmelCase_ = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase_ = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str , _snake_case :str , _snake_case :PreTrainedTokenizer , _snake_case :int , _snake_case :Optional[int] = None , ) -> Union[str, Any]: _A = {} if train_file is not None: _A = [train_file] if eval_file is not None: _A = [eval_file] if test_file is not None: _A = [test_file] _A = datasets.load_dataset('''csv''' , data_files=_snake_case ) _A = list(ds[list(files.keys() )[0]].features.keys() ) _A = features_name.pop(_snake_case ) _A = list(set(ds[list(files.keys() )[0]][label_name] ) ) _A = {label: i for i, label in enumerate(_snake_case )} _A = tokenizer.model_input_names _A = {} if len(_snake_case ) == 1: for k in files.keys(): _A = ds[k].map( lambda _snake_case : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=_snake_case , max_length=_snake_case , padding='''max_length''' ) , batched=_snake_case , ) elif len(_snake_case ) == 2: for k in files.keys(): _A = ds[k].map( lambda _snake_case : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=_snake_case , max_length=_snake_case , padding='''max_length''' , ) , batched=_snake_case , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) _A = ( tf.data.Dataset.from_generator( _snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _A = ( tf.data.Dataset.from_generator( _snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _A = ( tf.data.Dataset.from_generator( _snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCAmelCase_ = logging.getLogger(__name__) @dataclass class lowerCamelCase__ : """simple docstring""" a__ : int = field(metadata={"help": "Which column contains the label"}) a__ : str = field(default=_A , metadata={"help": "The path of the training file"}) a__ : Optional[str] = field(default=_A , metadata={"help": "The path of the development file"}) a__ : Optional[str] = field(default=_A , metadata={"help": "The path of the test file"}) a__ : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) a__ : bool = field( default=_A , metadata={"help": "Overwrite the cached training and evaluation sets"}) @dataclass class lowerCamelCase__ : """simple docstring""" a__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) a__ : Optional[str] = field( default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"}) a__ : Optional[str] = field( default=_A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) a__ : bool = field(default=_A , metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a__ : Optional[str] = field( default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _A , _A , _A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' F'''16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A , _A , _A , _A = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_snake_case , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_snake_case ) , labelaid=_snake_case , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _A = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , ) def compute_metrics(_snake_case :EvalPrediction ) -> Dict: _A = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _A = TFTrainer( model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _A = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _A = trainer.evaluate() _A = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(_snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) results.update(_snake_case ) return results if __name__ == "__main__": main()
2
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class lowerCamelCase__ ( _A): """simple docstring""" a__ : Union[PIL.Image.Image, np.ndarray] class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : int , __lowerCAmelCase : PriorTransformer , __lowerCAmelCase : CLIPVisionModel , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : HeunDiscreteScheduler , __lowerCAmelCase : ShapERenderer , ) -> List[str]: super().__init__() self.register_modules( prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ) -> int: if latents is None: _A = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _A = latents.to(__lowerCAmelCase ) _A = latents * scheduler.init_noise_sigma return latents def snake_case_ ( self : str , __lowerCAmelCase : List[Any]=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) _A = torch.device(f'''cuda:{gpu_id}''' ) _A = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowerCAmelCase , __lowerCAmelCase ) @property def snake_case_ ( self : int ) -> List[Any]: if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(__lowerCAmelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , ) -> Optional[int]: if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ): _A = torch.cat(__lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0 ) if not isinstance(__lowerCAmelCase , torch.Tensor ): _A = self.image_processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) _A = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase ) _A = self.image_encoder(__lowerCAmelCase )['''last_hidden_state'''] _A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _A = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 ) if do_classifier_free_guidance: _A = torch.zeros_like(__lowerCAmelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(__lowerCAmelCase ) def __call__( self : str , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> Tuple: if isinstance(__lowerCAmelCase , PIL.Image.Image ): _A = 1 elif isinstance(__lowerCAmelCase , torch.Tensor ): _A = image.shape[0] elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _A = len(__lowerCAmelCase ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase )}''' ) _A = self._execution_device _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # prior self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase ) _A = self.scheduler.timesteps _A = self.prior.config.num_embeddings _A = self.prior.config.embedding_dim _A = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _A = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance _A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) _A = self.prior( __lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding # remove the variance _A , _A = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _A , _A = noise_pred.chunk(2 ) _A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _A = self.scheduler.step( __lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=__lowerCAmelCase ) _A = [] for i, latent in enumerate(__lowerCAmelCase ): print() _A = self.renderer.decode( latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , ) images.append(__lowerCAmelCase ) _A = torch.stack(__lowerCAmelCase ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _A = images.cpu().numpy() if output_type == "pil": _A = [self.numpy_to_pil(__lowerCAmelCase ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=__lowerCAmelCase )
2
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
1
from maths.prime_factors import prime_factors def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): _A = F'''Input value of [number={number}] must be an integer''' raise TypeError(_snake_case ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(_snake_case ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
2
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
1
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : Any=24 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : str=6 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : int=5_12 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=10_00 , ) -> Tuple: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = scope _A = range_bbox def snake_case_ ( self : int ) -> str: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _A = bbox[i, j, 3] _A = bbox[i, j, 1] _A = t if bbox[i, j, 2] < bbox[i, j, 0]: _A = bbox[i, j, 2] _A = bbox[i, j, 0] _A = t _A = None if self.use_input_mask: _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case_ ( self : str ) -> Union[str, Any]: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def snake_case_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , ) -> Dict: _A = LiltModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) _A = model(__lowerCAmelCase , bbox=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , ) -> Tuple: _A = self.num_labels _A = LiltForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model( __lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , ) -> int: _A = LiltForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model( __lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_ ( self : Dict ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCamelCase__ ( _A , _A , _A , unittest.TestCase): """simple docstring""" a__ : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) a__ : Optional[Any] = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) a__ : Optional[int] = False a__ : Any = False def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[int]: return True def snake_case_ ( self : Tuple ) -> Optional[int]: _A = LiltModelTester(self ) _A = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def snake_case_ ( self : Union[str, Any] ) -> str: self.config_tester.run_common_tests() def snake_case_ ( self : Optional[Any] ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def snake_case_ ( self : Any ) -> Tuple: _A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def snake_case_ ( self : Optional[int] ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) def snake_case_ ( self : int ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) @slow def snake_case_ ( self : Optional[int] ) -> List[Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = LiltModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch @slow class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : str ) -> Tuple: _A = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__lowerCAmelCase ) _A = torch.tensor([[1, 2]] , device=__lowerCAmelCase ) _A = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCAmelCase ) # forward pass with torch.no_grad(): _A = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase ) _A = torch.Size([1, 2, 7_68] ) _A = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__lowerCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCAmelCase , atol=1E-3 ) )
2
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
1
from __future__ import annotations from typing import Any class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float = 0 ) -> None: _A , _A = row, column _A = [[default_value for c in range(__lowerCAmelCase )] for r in range(__lowerCAmelCase )] def __str__( self : Any ) -> str: _A = f'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier _A = 0 for row_vector in self.array: for obj in row_vector: _A = max(__lowerCAmelCase , len(str(__lowerCAmelCase ) ) ) _A = f'''%{max_element_length}s''' # Make string and return def single_line(__lowerCAmelCase : list[float] ) -> str: nonlocal string_format_identifier _A = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__lowerCAmelCase ) for row_vector in self.array ) return s def __repr__( self : Dict ) -> str: return str(self ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : tuple[int, int] ) -> bool: if not (isinstance(__lowerCAmelCase , (list, tuple) ) and len(__lowerCAmelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : int , __lowerCAmelCase : tuple[int, int] ) -> Any: assert self.validate_indicies(__lowerCAmelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self : int , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : float ) -> None: assert self.validate_indicies(__lowerCAmelCase ) _A = value def __add__( self : int , __lowerCAmelCase : Matrix ) -> Matrix: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) assert self.row == another.row and self.column == another.column # Add _A = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _A = self[r, c] + another[r, c] return result def __neg__( self : Optional[Any] ) -> Matrix: _A = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _A = -self[r, c] return result def __sub__( self : Tuple , __lowerCAmelCase : Matrix ) -> Matrix: return self + (-another) def __mul__( self : Optional[int] , __lowerCAmelCase : int | float | Matrix ) -> Matrix: if isinstance(__lowerCAmelCase , (int, float) ): # Scalar multiplication _A = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _A = self[r, c] * another return result elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): # Matrix multiplication assert self.column == another.row _A = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: _A = f'''Unsupported type given for another ({type(__lowerCAmelCase )})''' raise TypeError(__lowerCAmelCase ) def snake_case_ ( self : str ) -> Matrix: _A = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): _A = self[r, c] return result def snake_case_ ( self : List[Any] , __lowerCAmelCase : Matrix , __lowerCAmelCase : Matrix ) -> Any: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate _A = v.transpose() _A = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def SCREAMING_SNAKE_CASE_ ( ) -> None: # a^(-1) _A = Matrix(3 , 3 , 0 ) for i in range(3 ): _A = 1 print(F'''a^(-1) is {ainv}''' ) # u, v _A = Matrix(3 , 1 , 0 ) _A , _A , _A = 1, 2, -3 _A = Matrix(3 , 1 , 0 ) _A , _A , _A = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_snake_case , _snake_case )}''' ) def SCREAMING_SNAKE_CASE_ ( ) -> None: import doctest doctest.testmod() testa()
2
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Tuple , __lowerCAmelCase : T ) -> Optional[Any]: _A = data _A = None def __str__( self : Optional[Any] ) -> str: return f'''{self.data}''' class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Any ) -> None: _A = None def __iter__( self : int ) -> Iterator[T]: _A = self.top while node: yield node.data _A = node.next def __str__( self : Any ) -> str: return "->".join([str(__lowerCAmelCase ) for item in self] ) def __len__( self : List[str] ) -> int: return len(tuple(iter(self ) ) ) def snake_case_ ( self : Optional[Any] ) -> bool: return self.top is None def snake_case_ ( self : int , __lowerCAmelCase : T ) -> None: _A = Node(__lowerCAmelCase ) if not self.is_empty(): _A = self.top _A = node def snake_case_ ( self : List[Any] ) -> T: if self.is_empty(): raise IndexError('''pop from empty stack''' ) assert isinstance(self.top , __lowerCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def snake_case_ ( self : Optional[int] ) -> T: if self.is_empty(): raise IndexError('''peek from empty stack''' ) assert self.top is not None return self.top.data def snake_case_ ( self : Tuple ) -> None: _A = None if __name__ == "__main__": from doctest import testmod testmod()
2
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
1
from collections.abc import Sequence def SCREAMING_SNAKE_CASE_ ( _snake_case :Sequence[int] | None = None ) -> int: if nums is None or not nums: raise ValueError('''Input sequence should not be empty''' ) _A = nums[0] for i in range(1 , len(_snake_case ) ): _A = nums[i] _A = max(_snake_case , ans + num , _snake_case ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user UpperCAmelCase_ = int(input("""Enter number of elements : """).strip()) UpperCAmelCase_ = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
2
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Union[str, Any]=[0, 1, 2, 3] , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Union[str, Any]=[1, 3_84, 24, 24] , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=None , ) -> List[Any]: _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = backbone_out_indices _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = num_labels _A = backbone_featmap_shape _A = scope _A = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = num_patches + 1 def snake_case_ ( self : str ) -> str: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _A = self.get_config() return config, pixel_values, labels def snake_case_ ( self : Any ) -> int: _A = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 1_92, 3_84, 7_68], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def snake_case_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> List[str]: _A = DPTModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> Optional[Any]: _A = self.num_labels _A = DPTForDepthEstimation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ) -> Tuple: _A = self.num_labels _A = DPTForSemanticSegmentation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _A = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case_ ( self : Dict ) -> List[str]: _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( _A , _A , unittest.TestCase): """simple docstring""" a__ : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () a__ : Dict = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) a__ : Optional[Any] = False a__ : Any = False a__ : List[str] = False def snake_case_ ( self : List[Any] ) -> Any: _A = DPTModelTester(self ) _A = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def snake_case_ ( self : Optional[int] ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def snake_case_ ( self : Any ) -> str: pass def snake_case_ ( self : str ) -> Any: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def snake_case_ ( self : Dict ) -> Union[str, Any]: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def snake_case_ ( self : Any ) -> str: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase ) def snake_case_ ( self : Optional[int] ) -> Any: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = True if model_class in get_values(__lowerCAmelCase ): continue _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = model(**__lowerCAmelCase ).loss loss.backward() def snake_case_ ( self : List[str] ) -> Optional[int]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = False _A = True if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing: continue _A = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() _A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = model(**__lowerCAmelCase ).loss loss.backward() def snake_case_ ( self : Tuple ) -> Optional[int]: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = _config_zero_init(__lowerCAmelCase ) for model_class in self.all_model_classes: _A = model_class(config=__lowerCAmelCase ) # Skip the check for the backbone _A = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": _A = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case_ ( self : Any ) -> Optional[int]: pass @slow def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: _A = DPTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def snake_case_ ( self : str ) -> Optional[int]: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = '''add''' with self.assertRaises(__lowerCAmelCase ): _A = DPTForDepthEstimation(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Dict ) -> str: _A = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) _A = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__lowerCAmelCase ) _A = prepare_img() _A = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _A = model(**__lowerCAmelCase ) _A = outputs.predicted_depth # verify the predicted depth _A = torch.Size((1, 3_84, 3_84) ) self.assertEqual(predicted_depth.shape , __lowerCAmelCase ) _A = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __lowerCAmelCase , atol=1E-4 ) )
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
1
UpperCAmelCase_ = """Alexander Joslin""" import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> int: _A = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} _A = Stack() _A = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_snake_case ) ) elif i in operators: # RULE 2 operator_stack.push(_snake_case ) elif i == ")": # RULE 4 _A = operator_stack.peek() operator_stack.pop() _A = operand_stack.peek() operand_stack.pop() _A = operand_stack.peek() operand_stack.pop() _A = operators[opr](_snake_case , _snake_case ) operand_stack.push(_snake_case ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": UpperCAmelCase_ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
2
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
1
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: _A = [] if isinstance(_snake_case , _snake_case ): for v in tree.values(): shapes.extend(_fetch_dims(_snake_case ) ) elif isinstance(_snake_case , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(_snake_case ) ) elif isinstance(_snake_case , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Tuple[int, ...] ) -> Tuple[int, ...]: _A = [] for d in reversed(_snake_case ): idx.append(flat_idx % d ) _A = flat_idx // d return tuple(reversed(_snake_case ) ) @torch.jit.ignore def SCREAMING_SNAKE_CASE_ ( _snake_case :Sequence[int] , _snake_case :Sequence[int] , _snake_case :Sequence[int] , _snake_case :Optional[Sequence[bool]] = None , _snake_case :Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(_snake_case :List[bool] ) -> None: _A = True for i in range(len(_snake_case ) ): _A = -1 * (i + 1) l[reversed_idx] &= tally _A = l[reversed_idx] if start_edges is None: _A = [s == 0 for s in start] reduce_edge_list(_snake_case ) if end_edges is None: _A = [e == (d - 1) for e, d in zip(_snake_case , _snake_case )] reduce_edge_list(_snake_case ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(_snake_case ) == 0: return [()] elif len(_snake_case ) == 1: return [(slice(start[0] , end[0] + 1 ),)] _A = [] _A = [] # Dimensions common to start and end can be selected directly for s, e in zip(_snake_case , _snake_case ): if s == e: path_list.append(slice(_snake_case , s + 1 ) ) else: break _A = tuple(_snake_case ) _A = len(_snake_case ) # start == end, and we're done if divergence_idx == len(_snake_case ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _A = start[divergence_idx] return tuple( path + (slice(_snake_case , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _A = end[divergence_idx] return tuple( path + (slice(_snake_case , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) _A = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def SCREAMING_SNAKE_CASE_ ( _snake_case :torch.Tensor , _snake_case :int , _snake_case :int , _snake_case :int ) -> torch.Tensor: _A = t.shape[:no_batch_dims] _A = list(_flat_idx_to_idx(_snake_case , _snake_case ) ) # _get_minimal_slice_set is inclusive _A = list(_flat_idx_to_idx(flat_end - 1 , _snake_case ) ) # Get an ordered list of slices to perform _A = _get_minimal_slice_set( _snake_case , _snake_case , _snake_case , ) _A = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Callable , _snake_case :Dict[str, Any] , _snake_case :int , _snake_case :int , _snake_case :bool = False , _snake_case :Any = None , _snake_case :bool = False , ) -> Any: if not (len(_snake_case ) > 0): raise ValueError('''Must provide at least one input''' ) _A = [shape[:no_batch_dims] for shape in _fetch_dims(_snake_case )] _A = tuple([max(_snake_case ) for s in zip(*_snake_case )] ) def _prep_inputs(_snake_case :torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: _A = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) _A = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: _A = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t _A = tensor_tree_map(_prep_inputs , _snake_case ) _A = None if _out is not None: _A = tensor_tree_map(lambda _snake_case : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) _A = 1 for d in orig_batch_dims: flat_batch_dim *= d _A = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(_snake_case :torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t _A = 0 _A = prepped_outputs for _ in range(_snake_case ): # Chunk the input if not low_mem: _A = _select_chunk else: _A = partial( _chunk_slice , flat_start=_snake_case , flat_end=min(_snake_case , i + chunk_size ) , no_batch_dims=len(_snake_case ) , ) _A = tensor_tree_map(_snake_case , _snake_case ) # Run the layer on the chunk _A = layer(**_snake_case ) # Allocate space for the output if out is None: _A = tensor_tree_map(lambda _snake_case : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _snake_case ) # Put the chunk in its pre-allocated space if isinstance(_snake_case , _snake_case ): def assign(_snake_case :dict , _snake_case :dict ) -> None: for k, v in da.items(): if isinstance(_snake_case , _snake_case ): assign(_snake_case , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: _A = da[k] assign(_snake_case , _snake_case ) elif isinstance(_snake_case , _snake_case ): for xa, xa in zip(_snake_case , _snake_case ): if _add_into_out: xa[i : i + chunk_size] += xa else: _A = xa elif isinstance(_snake_case , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: _A = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size _A = tensor_tree_map(lambda _snake_case : t.view(orig_batch_dims + t.shape[1:] ) , _snake_case ) return out class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , __lowerCAmelCase : int = 5_12 , ) -> Optional[int]: _A = max_chunk_size _A = None _A = None def snake_case_ ( self : int , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int ) -> int: logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size _A = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] _A = [c for c in candidates if c > min_chunk_size] _A = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__lowerCAmelCase : int ) -> bool: try: with torch.no_grad(): fn(*__lowerCAmelCase , chunk_size=__lowerCAmelCase ) return True except RuntimeError: return False _A = 0 _A = len(__lowerCAmelCase ) - 1 while i > min_viable_chunk_size_index: _A = test_chunk_size(candidates[i] ) if not viable: _A = (min_viable_chunk_size_index + i) // 2 else: _A = i _A = (i + len(__lowerCAmelCase ) - 1) // 2 return candidates[min_viable_chunk_size_index] def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Iterable , __lowerCAmelCase : Iterable ) -> bool: _A = True for aa, aa in zip(__lowerCAmelCase , __lowerCAmelCase ): assert type(__lowerCAmelCase ) == type(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , (list, tuple) ): consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )] _A = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )] consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase ) else: consistent &= aa == aa return consistent def snake_case_ ( self : Any , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int , ) -> int: _A = True _A = tree_map(lambda __lowerCAmelCase : a.shape if isinstance(__lowerCAmelCase , torch.Tensor ) else a , __lowerCAmelCase , __lowerCAmelCase ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__lowerCAmelCase ) _A = self._compare_arg_caches(self.cached_arg_data , __lowerCAmelCase ) else: # Otherwise, we can reuse the precomputed value _A = False if not consistent: _A = self._determine_favorable_chunk_size( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) _A = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
2
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
1
# flake8: noqa # Lint as: python3 UpperCAmelCase_ = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
2
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
1
UpperCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def SCREAMING_SNAKE_CASE_ ( _snake_case :float , _snake_case :str , _snake_case :str ) -> float: _A = from_type.lower().strip('''s''' ) _A = to_type.lower().strip('''s''' ) _A = UNIT_SYMBOL.get(_snake_case , _snake_case ) _A = UNIT_SYMBOL.get(_snake_case , _snake_case ) if from_sanitized not in METRIC_CONVERSION: _A = ( F'''Invalid \'from_type\' value: {from_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_snake_case )}''' ) raise ValueError(_snake_case ) if to_sanitized not in METRIC_CONVERSION: _A = ( F'''Invalid \'to_type\' value: {to_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_snake_case )}''' ) raise ValueError(_snake_case ) _A = METRIC_CONVERSION[from_sanitized] _A = METRIC_CONVERSION[to_sanitized] _A = 1 if from_exponent > to_exponent: _A = from_exponent - to_exponent else: _A = -(to_exponent - from_exponent) return value * pow(10 , _snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
2
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
1
import os from distutils.util import strtobool def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :List[Any] ) -> Optional[Any]: for e in env_keys: _A = int(os.environ.get(_snake_case , -1 ) ) if val >= 0: return val return default def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :Dict=False ) -> int: _A = os.environ.get(_snake_case , str(_snake_case ) ) return strtobool(_snake_case ) == 1 # As its name indicates `strtobool` actually returns an int... def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict="no" ) -> List[str]: _A = os.environ.get(_snake_case , str(_snake_case ) ) return value
2
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> Optional[int]: _A = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: _A = 128 elif "12-12" in model_name: _A = 12 _A = 12 elif "14-14" in model_name: _A = 14 _A = 14 elif "16-16" in model_name: _A = 16 _A = 16 else: raise ValueError('''Model not supported''' ) _A = '''huggingface/label-files''' if "speech-commands" in model_name: _A = 35 _A = '''speech-commands-v2-id2label.json''' else: _A = 527 _A = '''audioset-id2label.json''' _A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) _A = {int(_snake_case ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> Tuple: if "module.v" in name: _A = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: _A = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: _A = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: _A = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: _A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: _A = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: _A = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: _A = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _A = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _A = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _A = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _A = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: _A = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: _A = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: _A = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :List[str] ) -> List[Any]: for key in orig_state_dict.copy().keys(): _A = orig_state_dict.pop(_snake_case ) if "qkv" in key: _A = key.split('''.''' ) _A = int(key_split[3] ) _A = config.hidden_size if "weight" in key: _A = val[:dim, :] _A = val[dim : dim * 2, :] _A = val[-dim:, :] else: _A = val[:dim] _A = val[dim : dim * 2] _A = val[-dim:] else: _A = val return orig_state_dict def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Dict: _A = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Dict , _snake_case :List[Any]=False ) -> Optional[Any]: _A = get_audio_spectrogram_transformer_config(_snake_case ) _A = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict _A = model_name_to_url[model_name] _A = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' ) # remove some keys remove_keys(_snake_case ) # rename some keys _A = convert_state_dict(_snake_case , _snake_case ) # load 🤗 model _A = ASTForAudioClassification(_snake_case ) model.eval() model.load_state_dict(_snake_case ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 _A = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 _A = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 _A = 1_024 if '''speech-commands''' not in model_name else 128 _A = ASTFeatureExtractor(mean=_snake_case , std=_snake_case , max_length=_snake_case ) if "speech-commands" in model_name: _A = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) _A = dataset[0]['''audio''']['''array'''] else: _A = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) _A , _A = torchaudio.load(_snake_case ) _A = waveform.squeeze().numpy() _A = feature_extractor(_snake_case , sampling_rate=16_000 , return_tensors='''pt''' ) # forward pass _A = model(**_snake_case ) _A = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": _A = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": _A = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": _A = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": _A = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": _A = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": _A = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": _A = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": _A = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(_snake_case ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
2
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
1
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel UpperCAmelCase_ = False UpperCAmelCase_ = True UpperCAmelCase_ = False if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } UpperCAmelCase_ = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } UpperCAmelCase_ = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: UpperCAmelCase_ = reader.read() UpperCAmelCase_ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): UpperCAmelCase_ = UNetaDModel(**config) else: UpperCAmelCase_ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel UpperCAmelCase_ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) UpperCAmelCase_ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: UpperCAmelCase_ = config[key] del config[key] UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: UpperCAmelCase_ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) UpperCAmelCase_ = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue UpperCAmelCase_ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: UpperCAmelCase_ = param_value UpperCAmelCase_ = True if not has_changed: UpperCAmelCase_ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
2
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
1
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :int | str ) -> bool: _A = str(_snake_case ) return n == n[::-1] def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 1_000_000 ) -> Any: _A = 0 for i in range(1 , _snake_case ): if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('''b''' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
2
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path UpperCAmelCase_ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) UpperCAmelCase_ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} UpperCAmelCase_ = """zero2""" UpperCAmelCase_ = """zero3""" UpperCAmelCase_ = [ZEROa, ZEROa] def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Dict , _snake_case :Union[str, Any] ) -> Tuple: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _A = parameterized.to_safe_name('''_'''.join(str(_snake_case ) for x in param.args ) ) return F'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test UpperCAmelCase_ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCamelCase__ ( _A): """simple docstring""" @parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Dict: self.run_and_check( stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , ) @require_torch_multi_gpu @parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase ) def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ) -> Dict: self.run_and_check( stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , ) @parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase ) def snake_case_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> str: self.run_and_check( stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , ) @require_torch_multi_gpu @parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[str]: self.run_and_check( stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> int: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int = 10 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , ) -> Any: _A = models[model] _A = self.run_trainer( stage=__lowerCAmelCase , model_name=__lowerCAmelCase , eval_steps=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , ) self.do_checks(__lowerCAmelCase ) return output_dir def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , ) -> Any: _A = self.get_auto_remove_tmp_dir('''./xxx''' , after=__lowerCAmelCase ) _A = f''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _A = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _A = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _A = self.get_launcher(__lowerCAmelCase ) _A = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) return output_dir def snake_case_ ( self : List[str] , __lowerCAmelCase : Dict=False ) -> Tuple: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _A = min(2 , get_gpu_count() ) if distributed else 1 return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
2
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
1
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Optional[Any]: _A = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Optional[int]: _A , _A = emb.weight.shape _A = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) _A = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Tuple="facebook/mbart-large-en-ro" , _snake_case :Tuple=False , _snake_case :Optional[Any]=False ) -> Optional[int]: _A = torch.load(_snake_case , map_location='''cpu''' )['''model'''] remove_ignore_keys_(_snake_case ) _A = state_dict['''encoder.embed_tokens.weight'''].shape[0] _A = MBartConfig.from_pretrained(_snake_case , vocab_size=_snake_case ) if mbart_aa and finetuned: _A = '''relu''' _A = state_dict['''decoder.embed_tokens.weight'''] _A = MBartForConditionalGeneration(_snake_case ) model.model.load_state_dict(_snake_case ) if finetuned: _A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default="""facebook/mbart-large-cc25""", type=str, help="""Which huggingface architecture to use: mbart-large""", ) parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""") parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""") UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
1
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__ ( _A): """simple docstring""" a__ : str = ["input_values", "attention_mask"] def __init__( self : Tuple , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 1_60_00 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : bool = False , __lowerCAmelCase : int = 80 , __lowerCAmelCase : int = 16 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : str = "hann_window" , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : float = 80 , __lowerCAmelCase : float = 76_00 , __lowerCAmelCase : float = 1E-10 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : bool = True , **__lowerCAmelCase : List[Any] , ) -> Dict: super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 10_00 _A = hop_length * sampling_rate // 10_00 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , __lowerCAmelCase , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , __lowerCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def snake_case_ ( __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : float = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: _A = np.array(__lowerCAmelCase , np.intaa ) _A = [] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(__lowerCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def snake_case_ ( self : Dict , __lowerCAmelCase : np.ndarray , ) -> np.ndarray: _A = spectrogram( __lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self : List[str] , __lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[str] , ) -> BatchFeature: if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: _A = self._process_audio( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['''input_values'''] _A = inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def snake_case_ ( self : Any , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : bool = False , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : int , ) -> BatchFeature: _A = isinstance(__lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): _A = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(__lowerCAmelCase ) for waveform in speech] _A = BatchFeature({'''input_values''': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'''input_values''': speech} ) _A = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(__lowerCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(__lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: _A = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=__lowerCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs def snake_case_ ( self : Union[str, Any] ) -> Dict[str, Any]: _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
2
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ) -> str: _A = dataset _A = process _A = params def __len__( self : Any ) -> Union[str, Any]: return len(self.dataset ) def __getitem__( self : Dict , __lowerCAmelCase : Union[str, Any] ) -> Any: _A = self.dataset[i] _A = self.process(__lowerCAmelCase , **self.params ) return processed class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=None ) -> int: _A = loader _A = infer _A = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _A = None _A = loader_batch_size # Internal bookkeeping _A = None _A = None def __len__( self : List[Any] ) -> Optional[int]: return len(self.loader ) def __iter__( self : Any ) -> Optional[int]: _A = iter(self.loader ) return self def snake_case_ ( self : Union[str, Any] ) -> Optional[int]: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _A = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _A = {} for k, element in self._loader_batch_data.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # Convert ModelOutput to tuple first _A = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__lowerCAmelCase , __lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _A = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _A = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _A = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _A = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _A = self._loader_batch_data.__class__(__lowerCAmelCase ) self._loader_batch_index += 1 return result def snake_case_ ( self : str ) -> str: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _A = next(self.iterator ) _A = self.infer(__lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(__lowerCAmelCase , torch.Tensor ): _A = processed else: _A = list(processed.keys() )[0] _A = processed[key] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = len(__lowerCAmelCase ) else: _A = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _A = observed_batch_size # Setting internal index to unwrap the batch _A = processed _A = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str=None ) -> int: super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def __iter__( self : Optional[Any] ) -> str: _A = iter(self.loader ) _A = None return self def snake_case_ ( self : List[str] ) -> int: if self.subiterator is None: _A = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _A = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _A = self.infer(next(self.iterator ) , **self.params ) _A = next(self.subiterator ) return processed class lowerCamelCase__ ( _A): """simple docstring""" def __iter__( self : Optional[int] ) -> Optional[int]: _A = iter(self.loader ) return self def snake_case_ ( self : List[Any] ) -> Dict: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _A = False _A = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _A = self.loader_batch_item() _A = item.pop('''is_last''' ) accumulator.append(__lowerCAmelCase ) if is_last: return accumulator while not is_last: _A = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(__lowerCAmelCase , torch.Tensor ): _A = processed else: _A = list(processed.keys() )[0] _A = processed[key] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = len(__lowerCAmelCase ) else: _A = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _A = observed_batch_size _A = processed _A = 0 while self._loader_batch_index < self.loader_batch_size: _A = self.loader_batch_item() _A = item.pop('''is_last''' ) accumulator.append(__lowerCAmelCase ) if is_last: return accumulator else: _A = processed _A = item.pop('''is_last''' ) accumulator.append(__lowerCAmelCase ) return accumulator class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : Dataset , __lowerCAmelCase : str ) -> List[str]: _A = dataset _A = key def __len__( self : Optional[Any] ) -> str: return len(self.dataset ) def __getitem__( self : Optional[int] , __lowerCAmelCase : int ) -> Tuple: return self.dataset[i][self.key] class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : int , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> List[str]: _A = dataset _A = keya _A = keya def __len__( self : Union[str, Any] ) -> Optional[int]: return len(self.dataset ) def __getitem__( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Any: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
2
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
1
from functools import reduce UpperCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda _snake_case , _snake_case : str(int(_snake_case ) * int(_snake_case ) ) , n[i : i + 13] ) ) for i in range(len(_snake_case ) - 12 ) ) if __name__ == "__main__": print(f'{solution() = }')
2
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch UpperCAmelCase_ = logging.get_logger(__name__) @dataclass class lowerCamelCase__ : """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=6.0 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : str=None , __lowerCAmelCase : Tuple="fp4" , __lowerCAmelCase : Dict=False , **__lowerCAmelCase : List[str] , ) -> List[Any]: _A = load_in_abit _A = load_in_abit _A = llm_inta_threshold _A = llm_inta_skip_modules _A = llm_inta_enable_fpaa_cpu_offload _A = llm_inta_has_fpaa_weight _A = bnb_abit_quant_type _A = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: _A = torch.floataa elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = getattr(__lowerCAmelCase , __lowerCAmelCase ) elif isinstance(__lowerCAmelCase , torch.dtype ): _A = bnb_abit_compute_dtype else: raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' ) self.post_init() def snake_case_ ( self : Optional[Any] ) -> str: if not isinstance(self.llm_inta_threshold , __lowerCAmelCase ): raise ValueError('''llm_int8_threshold must be a float''' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowerCAmelCase ): raise ValueError('''llm_int8_skip_modules must be a list of strings''' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowerCAmelCase ): raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' ) if not isinstance(self.llm_inta_has_fpaa_weight , __lowerCAmelCase ): raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' ) if not isinstance(self.bnb_abit_quant_type , __lowerCAmelCase ): raise ValueError('''bnb_4bit_quant_type must be a string''' ) if not isinstance(self.bnb_abit_use_double_quant , __lowerCAmelCase ): raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' ) if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse( '''0.39.0''' ): raise ValueError( '''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' ) def snake_case_ ( self : Dict ) -> str: return self.load_in_abit or self.load_in_abit def snake_case_ ( self : Union[str, Any] ) -> Tuple: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def snake_case_ ( cls : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ) -> Optional[Any]: _A = cls(**__lowerCAmelCase ) _A = [] for key, value in kwargs.items(): if hasattr(__lowerCAmelCase , __lowerCAmelCase ): setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) to_remove.append(__lowerCAmelCase ) for key in to_remove: kwargs.pop(__lowerCAmelCase , __lowerCAmelCase ) if return_unused_kwargs: return config, kwargs else: return config def snake_case_ ( self : Dict , __lowerCAmelCase : Union[str, os.PathLike] ) -> Optional[int]: with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: _A = self.to_dict() _A = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + '''\n''' writer.write(__lowerCAmelCase ) def snake_case_ ( self : Optional[int] ) -> Dict[str, Any]: _A = copy.deepcopy(self.__dict__ ) _A = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1] return output def __repr__( self : Optional[int] ) -> List[str]: return f'''{self.__class__.__name__} {self.to_json_string()}''' def snake_case_ ( self : str , __lowerCAmelCase : bool = True ) -> str: if use_diff is True: _A = self.to_diff_dict() else: _A = self.to_dict() return json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n" def snake_case_ ( self : Tuple ) -> Dict[str, Any]: _A = self.to_dict() # get the default config dict _A = BitsAndBytesConfig().to_dict() _A = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: _A = value return serializable_config_dict
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
class lowerCamelCase__ : """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : list[int] ) -> None: _A = len(__lowerCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __lowerCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def snake_case_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> bool: _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__lowerCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
2
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
1
import os from pathlib import Path def SCREAMING_SNAKE_CASE_ ( ) -> Dict: from torch.utils.cpp_extension import load _A = Path(_snake_case ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr''' _A = [ root / filename for filename in [ '''vision.cpp''', os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ), os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ), ] ] load( '''MultiScaleDeformableAttention''' , _snake_case , with_cuda=_snake_case , extra_include_paths=[str(_snake_case )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[ '''-DCUDA_HAS_FP16=1''', '''-D__CUDA_NO_HALF_OPERATORS__''', '''-D__CUDA_NO_HALF_CONVERSIONS__''', '''-D__CUDA_NO_HALF2_OPERATORS__''', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
2
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
1
from __future__ import annotations from typing import Any class lowerCamelCase__ ( _A): """simple docstring""" pass class lowerCamelCase__ : """simple docstring""" def __init__( self : int , __lowerCAmelCase : Any ) -> None: _A = data _A = None def __iter__( self : int ) -> Optional[Any]: _A = self _A = [] while node: if node in visited: raise ContainsLoopError visited.append(__lowerCAmelCase ) yield node.data _A = node.next_node @property def snake_case_ ( self : str ) -> bool: try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCAmelCase_ = Node(1) UpperCAmelCase_ = Node(2) UpperCAmelCase_ = Node(3) UpperCAmelCase_ = Node(4) print(root_node.has_loop) # False UpperCAmelCase_ = root_node.next_node print(root_node.has_loop) # True UpperCAmelCase_ = Node(5) UpperCAmelCase_ = Node(6) UpperCAmelCase_ = Node(5) UpperCAmelCase_ = Node(6) print(root_node.has_loop) # False UpperCAmelCase_ = Node(1) print(root_node.has_loop) # False
2
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class lowerCamelCase__ ( _A , _A): """simple docstring""" a__ : List[Any] = "focalnet" def __init__( self : Union[str, Any] , __lowerCAmelCase : str=2_24 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Tuple=[1_92, 3_84, 7_68, 7_68] , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : List[str]=[2, 2, 2, 2] , __lowerCAmelCase : str=[3, 3, 3, 3] , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=4.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : int=False , __lowerCAmelCase : str=1E-4 , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=1E-5 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]: super().__init__(**__lowerCAmelCase ) _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = use_conv_embed _A = hidden_sizes _A = depths _A = focal_levels _A = focal_windows _A = hidden_act _A = mlp_ratio _A = hidden_dropout_prob _A = drop_path_rate _A = use_layerscale _A = layerscale_value _A = use_post_layernorm _A = use_post_layernorm_in_modulation _A = normalize_modulator _A = initializer_range _A = layer_norm_eps _A = encoder_stride _A = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] _A , _A = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
2
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
1
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> str: _A = RobertaPreLayerNormConfig.from_pretrained( _snake_case , architectures=['''RobertaPreLayerNormForMaskedLM'''] ) # convert state_dict _A = torch.load(hf_hub_download(repo_id=_snake_case , filename='''pytorch_model.bin''' ) ) _A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('''roberta.''' ): _A = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ): continue _A = tensor_value _A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_snake_case , config=_snake_case , state_dict=_snake_case ) model.save_pretrained(_snake_case ) # convert tokenizer _A = AutoTokenizer.from_pretrained(_snake_case ) tokenizer.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
2
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
1
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
1