code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = 1
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : Dict = (3_2, 3_2)
_lowerCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(snake_case_ )
@property
def __UpperCamelCase ( self ):
def extract(*snake_case_ , **snake_case_ ):
class a_ :
def __init__( self ):
_lowerCAmelCase : int = torch.ones([0] )
def __UpperCamelCase ( self , snake_case_ ):
self.pixel_values.to(snake_case_ )
return self
return Out()
return extract
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.dummy_cond_unet
_lowerCAmelCase : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
_lowerCAmelCase : Dict = self.dummy_vae
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : str = StableDiffusionPipeline(
unet=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , safety_checker=snake_case_ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : Tuple = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : int = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : str = sd_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : Any = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : Any = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=snake_case_ , )[0]
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase : List[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : str = self.dummy_cond_unet
_lowerCAmelCase : int = PNDMScheduler(skip_prk_steps=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_vae
_lowerCAmelCase : int = self.dummy_text_encoder
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Union[str, Any] = StableDiffusionPipeline(
unet=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , safety_checker=snake_case_ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : List[Any] = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : str = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : List[str] = sd_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : Any = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=snake_case_ , )[0]
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase : Tuple = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(pipe.scheduler , snake_case_ )
assert pipe.safety_checker is None
_lowerCAmelCase : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(snake_case_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase : Optional[int] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.dummy_cond_unet
_lowerCAmelCase : Any = PNDMScheduler(skip_prk_steps=snake_case_ )
_lowerCAmelCase : int = self.dummy_vae
_lowerCAmelCase : int = self.dummy_text_encoder
_lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_lowerCAmelCase : Union[str, Any] = unet.half()
_lowerCAmelCase : List[str] = vae.half()
_lowerCAmelCase : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Any = StableDiffusionPipeline(
unet=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , safety_checker=snake_case_ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : int = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=snake_case_ )
_lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Tuple = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_lowerCAmelCase : Optional[int] = 4_0_0_3_6_6_0_3_4_6
_lowerCAmelCase : str = 7
# without safety guidance (sld_guidance_scale = 0)
_lowerCAmelCase : List[str] = torch.manual_seed(snake_case_ )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=snake_case_ , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_lowerCAmelCase : Tuple = torch.manual_seed(snake_case_ )
_lowerCAmelCase : Tuple = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=snake_case_ , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=snake_case_ )
_lowerCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[int] = """padme amidala taking a bath artwork, safe for work, no nudity"""
_lowerCAmelCase : Optional[Any] = 2_7_3_4_9_7_1_7_5_5
_lowerCAmelCase : Dict = 7
_lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case_ )
_lowerCAmelCase : Any = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=snake_case_ , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : Tuple = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case_ )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=snake_case_ , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_lowerCAmelCase : Optional[int] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : str = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_lowerCAmelCase : str = 1_0_4_4_3_5_5_2_3_4
_lowerCAmelCase : Tuple = 1_2
_lowerCAmelCase : int = torch.manual_seed(snake_case_ )
_lowerCAmelCase : Optional[Any] = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=snake_case_ , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : Union[str, Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_lowerCAmelCase : List[Any] = torch.manual_seed(snake_case_ )
_lowerCAmelCase : Tuple = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=snake_case_ , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : Union[str, Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 384 |
'''simple docstring'''
from math import factorial
UpperCamelCase_ = {str(digit): factorial(digit) for digit in range(10)}
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : int = 60 , _lowerCamelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
_lowerCAmelCase : Union[str, Any] = 0
# the cached sizes of the previous chains
_lowerCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , _lowerCamelCase ):
# The temporary set will contain the elements of the chain
_lowerCAmelCase : Any = set()
_lowerCAmelCase : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowerCAmelCase : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCamelCase )
chain_set_length += 1
_lowerCAmelCase : List[Any] = digit_factorial_sum(_lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowerCAmelCase : Union[str, Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 384 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase ( __snake_case ):
A__ : jnp.ndarray
A__ : jnp.ndarray
class UpperCAmelCase ( nn.Module ):
A__ : int
A__ : Tuple[int] = (16, 32, 96, 256)
A__ : jnp.dtype = jnp.floataa
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_snake_case = []
for i in range(len(self.block_out_channels ) - 1 ):
_snake_case = self.block_out_channels[i]
_snake_case = self.block_out_channels[i + 1]
_snake_case = nn.Conv(
A_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
_snake_case = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
_snake_case = blocks
_snake_case = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = self.conv_in(A_ )
_snake_case = nn.silu(A_ )
for block in self.blocks:
_snake_case = block(A_ )
_snake_case = nn.silu(A_ )
_snake_case = self.conv_out(A_ )
return embedding
@flax_register_to_config
class UpperCAmelCase ( nn.Module,__snake_case,__snake_case ):
A__ : int = 32
A__ : int = 4
A__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ : Union[bool, Tuple[bool]] = False
A__ : Tuple[int] = (320, 640, 1280, 1280)
A__ : int = 2
A__ : Union[int, Tuple[int]] = 8
A__ : Optional[Union[int, Tuple[int]]] = None
A__ : int = 1280
A__ : float = 0.0
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
A__ : bool = True
A__ : int = 0
A__ : str = "rgb"
A__ : Tuple[int] = (16, 32, 96, 256)
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case = jnp.zeros(A_ , dtype=jnp.floataa )
_snake_case = jnp.ones((1,) , dtype=jnp.intaa )
_snake_case = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_snake_case = (1, 3, self.sample_size * 8, self.sample_size * 8)
_snake_case = jnp.zeros(A_ , dtype=jnp.floataa )
_snake_case = jax.random.split(A_ )
_snake_case = {"params": params_rng, "dropout": dropout_rng}
return self.init(A_ , A_ , A_ , A_ , A_ )["params"]
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.block_out_channels
_snake_case = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case = self.num_attention_heads or self.attention_head_dim
# input
_snake_case = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_snake_case = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_snake_case = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
_snake_case = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_snake_case = self.only_cross_attention
if isinstance(A_ , A_ ):
_snake_case = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
_snake_case = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case = []
_snake_case = []
_snake_case = block_out_channels[0]
_snake_case = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_snake_case = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
_snake_case = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
if not is_final_block:
_snake_case = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
_snake_case = down_blocks
_snake_case = controlnet_down_blocks
# mid
_snake_case = block_out_channels[-1]
_snake_case = FlaxUNetMidBlockaDCrossAttn(
in_channels=A_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_snake_case = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] = 1.0 , __lowerCamelCase : str = True , __lowerCamelCase : Any = False , ):
"""simple docstring"""
_snake_case = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_snake_case = jnp.flip(A_ , axis=1 )
# 1. time
if not isinstance(A_ , jnp.ndarray ):
_snake_case = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case = timesteps.astype(dtype=jnp.floataa )
_snake_case = jnp.expand_dims(A_ , 0 )
_snake_case = self.time_proj(A_ )
_snake_case = self.time_embedding(A_ )
# 2. pre-process
_snake_case = jnp.transpose(A_ , (0, 2, 3, 1) )
_snake_case = self.conv_in(A_ )
_snake_case = jnp.transpose(A_ , (0, 2, 3, 1) )
_snake_case = self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
_snake_case = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
_snake_case = down_block(A_ , A_ , A_ , deterministic=not train )
else:
_snake_case = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_snake_case = self.mid_block(A_ , A_ , A_ , deterministic=not train )
# 5. contronet blocks
_snake_case = ()
for down_block_res_sample, controlnet_block in zip(A_ , self.controlnet_down_blocks ):
_snake_case = controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_snake_case = controlnet_down_block_res_samples
_snake_case = self.controlnet_mid_block(A_ )
# 6. scaling
_snake_case = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_ , mid_block_res_sample=A_ )
| 716 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_snake_case , _snake_case = array[indexa], array[indexa]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
for i in range(lowerCAmelCase_ , low + middle ):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1 )
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0 )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = input('''Enter numbers separated by a comma:\n''').strip()
snake_case = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 404 | 0 |
'''simple docstring'''
from math import pi
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 119 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Tuple ):
'''simple docstring'''
__lowercase =[0 for i in range(r + 1 )]
# nc0 = 1
__lowercase =1
for i in range(1, n + 1 ):
# to compute current row from previous row.
__lowercase =min(lowercase__, lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 119 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase_ ( __A , __A , __A , unittest.TestCase ):
lowerCamelCase_ = StableUnCLIPImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase_ = frozenset([] )
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__A , projection_dim=__A , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImageNormalizer(embedding_dim=__A )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL()
SCREAMING_SNAKE_CASE__ = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def _snake_case ( self :List[Any] , __A :str , __A :int=0 , __A :Dict=True ) -> Dict:
"""simple docstring"""
if str(__A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(__A )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if pil_image:
SCREAMING_SNAKE_CASE__ = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = DiffusionPipeline.numpy_to_pil(__A )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self :List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline(**__A )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A )
inputs.update({"""image_embeds""": None} )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__A ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__A )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__A , """anime turle""" , generator=__A , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__A , """anime turle""" , generator=__A , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self :int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = pipe(
__A , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 703 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , *__A :Tuple , **__A :Dict ) -> None:
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A ) | 59 | 0 |
def lowercase__ ( A_: str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 68 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass | 44 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _A () -> List[str]:
'''simple docstring'''
_a = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=lowerCAmelCase__ )
_a = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase__ )
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
TestCommand.register_subcommand(lowerCAmelCase__ )
RunBeamCommand.register_subcommand(lowerCAmelCase__ )
DummyDataCommand.register_subcommand(lowerCAmelCase__ )
# Parse args
_a , _a = parser.parse_known_args()
if not hasattr(lowerCAmelCase__ , 'func' ):
parser.print_help()
exit(1 )
_a = parse_unknown_args(lowerCAmelCase__ )
# Run
_a = args.func(lowerCAmelCase__ , **lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 532 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=32 , __magic_name__=2 , __magic_name__=3 , __magic_name__=16 , __magic_name__=[1, 2, 1] , __magic_name__=[2, 2, 4] , __magic_name__=2 , __magic_name__=2.0 , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__="gelu" , __magic_name__=False , __magic_name__=True , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=10 , __magic_name__=8 , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = num_heads
_a = window_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = use_absolute_embeddings
_a = patch_norm
_a = layer_norm_eps
_a = initializer_range
_a = is_training
_a = scope
_a = use_labels
_a = type_sequence_label_size
_a = encoder_stride
def __UpperCAmelCase ( self ) -> List[Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> int:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = SwinvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ )
_a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
_a = SwinvaForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a = 1
_a = SwinvaForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
_a = self.type_sequence_label_size
_a = SwinvaForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_lowerCAmelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> List[str]:
_a = SwinvaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 )
def __UpperCAmelCase ( self ) -> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def __UpperCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __UpperCAmelCase ( self ) -> Any:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_a = outputs.attentions
_a = len(self.model_tester.depths )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = config.window_size**2
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_a = outputs.attentions
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_a = len(__magic_name__ )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
_a = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_a = 2
self.assertEqual(out_len + added_hidden_states , len(__magic_name__ ) )
_a = outputs.attentions
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_a = outputs.hidden_states
_a = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# Swinv2 has a different seq_length
_a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_a = outputs.reshaped_hidden_states
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
_a , _a , _a , _a = reshaped_hidden_states[0].shape
_a = (
reshaped_hidden_states[0].view(__magic_name__ , __magic_name__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
def __UpperCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> str:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = SwinvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
_a = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__magic_name__ )
_a = self.default_image_processor
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a = image_processor(images=__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_a = model(**__magic_name__ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_a = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 532 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __UpperCamelCase ( A__ ):
__A : List[Any] = """lilt"""
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=4 , _UpperCamelCase=1024 , **_UpperCamelCase , ):
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = channel_shrink_ratio
_UpperCAmelCase = max_ad_position_embeddings | 32 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3_2 , lowercase__=1_6 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=3_2 , lowercase__=4 , lowercase__=[0, 1, 2, 3] , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=[1, 3_8_4, 2_4, 2_4] , lowercase__=True , lowercase__=None , ):
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = backbone_out_indices
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : List[Any] = backbone_featmap_shape
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : Optional[Any] = (image_size // patch_size) ** 2
__UpperCAmelCase : Any = num_patches + 1
def A( self):
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def A( self):
__UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowercase__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = DPTModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Dict = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Optional[Any] = DPTForDepthEstimation(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : str = model(lowercase__)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : str = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def A( self):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Any = False
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : Any = DPTModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(lowercase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
__UpperCAmelCase : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[str] = [*signature.parameters.keys()]
__UpperCAmelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowercase__)
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__)
def A( self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
if model_class in get_values(lowercase__):
continue
__UpperCAmelCase : List[Any] = model_class(lowercase__)
model.to(lowercase__)
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
__UpperCAmelCase : Any = model(**lowercase__).loss
loss.backward()
def A( self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : str = True
if model_class in get_values(lowercase__) or not model_class.supports_gradient_checkpointing:
continue
__UpperCAmelCase : Tuple = model_class(lowercase__)
model.to(lowercase__)
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : Tuple = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
__UpperCAmelCase : Union[str, Any] = model(**lowercase__).loss
loss.backward()
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = _config_zero_init(lowercase__)
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(config=lowercase__)
# Skip the check for the backbone
__UpperCAmelCase : List[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__UpperCAmelCase : Optional[Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A( self):
pass
@slow
def A( self):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__UpperCAmelCase : Optional[int] = DPTModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = '''add'''
with self.assertRaises(lowercase__):
__UpperCAmelCase : Optional[Any] = DPTForDepthEstimation(lowercase__)
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : str = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''')
__UpperCAmelCase : str = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''').to(lowercase__)
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=lowercase__ , return_tensors='''pt''').to(lowercase__)
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**lowercase__)
__UpperCAmelCase : str = outputs.predicted_depth
# verify the predicted depth
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 3_8_4, 3_8_4))
self.assertEqual(predicted_depth.shape , lowercase__)
__UpperCAmelCase : List[str] = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , lowercase__ , atol=1e-4))
| 462 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowerCAmelCase :Optional[int] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
_lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
SCREAMING_SNAKE_CASE : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda a_ : version.Version(_lowerCamelCase ) )
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
init_hf_modules()
SCREAMING_SNAKE_CASE : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a_ ) -> List[str]:
'''simple docstring'''
with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE : Union[str, Any] = re.findall('^\s*import\s+\.(\S+)\s*$' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def __lowerCAmelCase ( a_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = [module_file]
SCREAMING_SNAKE_CASE : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Path(_lowerCamelCase ).parent
SCREAMING_SNAKE_CASE : List[str] = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
SCREAMING_SNAKE_CASE : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def __lowerCAmelCase ( a_ ) -> Optional[int]:
'''simple docstring'''
with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE : Optional[int] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE : Any = list(set(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def __lowerCAmelCase ( a_ , a_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( a_ ) -> List[str]:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
SCREAMING_SNAKE_CASE : Any = cls
return pipeline_class
def __lowerCAmelCase ( a_ , a_ , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = str(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE : Any = "local"
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE : Union[str, Any] = "v" + ".".join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
SCREAMING_SNAKE_CASE : Optional[Any] = f"""v{revision}"""
elif revision == "main":
SCREAMING_SNAKE_CASE : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
SCREAMING_SNAKE_CASE : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = "git"
SCREAMING_SNAKE_CASE : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE : Dict = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE : str = submodule_path / commit_hash
SCREAMING_SNAKE_CASE : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , **a_ , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace('.py' , '' ) )
| 717 | '''simple docstring'''
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : str = str(bin(a_ ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(a_ ) )[2:]
if shift_amount >= len(a_ ):
return "0b0"
SCREAMING_SNAKE_CASE : Dict = binary_number[: len(a_ ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE : Tuple = '0' + str(bin(a_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE : Union[str, Any] = len(bin(a_ )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE : Any = bin(abs(a_ ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE : Optional[Any] = (
'1' + '0' * (binary_number_length - len(a_ )) + binary_number
)
if shift_amount >= len(a_ ):
return "0b" + binary_number[0] * len(a_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase ( ) -> Optional[int]:
lowercase : Dict = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
lowercase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(_A , """func""" ):
parser.print_help()
exit(1 )
# Run
lowercase : Dict = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 264 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
_lowerCAmelCase = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def UpperCamelCase ( _A ) -> List[str]:
lowercase : List[str] = EfficientNetConfig()
lowercase : Any = CONFIG_MAP[model_name]["""hidden_dim"""]
lowercase : List[str] = CONFIG_MAP[model_name]["""width_coef"""]
lowercase : str = CONFIG_MAP[model_name]["""depth_coef"""]
lowercase : int = CONFIG_MAP[model_name]["""image_size"""]
lowercase : List[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
lowercase : int = CONFIG_MAP[model_name]["""dw_padding"""]
lowercase : Optional[int] = """huggingface/label-files"""
lowercase : int = """imagenet-1k-id2label.json"""
lowercase : Any = 1_000
lowercase : Any = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowercase : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Optional[int] = Image.open(requests.get(_A , stream=_A ).raw )
return im
def UpperCamelCase ( _A ) -> Optional[Any]:
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[int] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_A , )
return preprocessor
def UpperCamelCase ( _A ) -> Optional[int]:
lowercase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowercase : Optional[Any] = sorted(set(_A ) )
lowercase : Dict = len(_A )
lowercase : List[str] = {b: str(_A ) for b, i in zip(_A , range(_A ) )}
lowercase : Union[str, Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowercase : str = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowercase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase : Optional[int] = """efficientnet.""" + item[1]
lowercase : Any = """classifier.weight"""
lowercase : Tuple = """classifier.bias"""
return key_mapping
def UpperCamelCase ( _A , _A , _A ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase : str = torch.from_numpy(_A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase : Optional[int] = torch.from_numpy(_A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase : List[Any] = torch.from_numpy(np.transpose(_A ) )
else:
lowercase : Optional[int] = torch.from_numpy(_A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_A )
@torch.no_grad()
def UpperCamelCase ( _A , _A , _A , _A ) -> str:
lowercase : Any = model_classes[model_name](
include_top=_A , weights="""imagenet""" , input_tensor=_A , input_shape=_A , pooling=_A , classes=1_000 , classifier_activation="""softmax""" , )
lowercase : Dict = original_model.trainable_variables
lowercase : Any = original_model.non_trainable_variables
lowercase : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase : Dict = param.numpy()
lowercase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
lowercase : str = get_efficientnet_config(_A )
lowercase : List[Any] = EfficientNetForImageClassification(_A ).eval()
lowercase : Optional[int] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowercase : int = rename_keys(_A )
replace_params(_A , _A , _A )
# Initialize preprocessor and preprocess input image
lowercase : Optional[int] = convert_image_processor(_A )
lowercase : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase : Union[str, Any] = hf_model(**_A )
lowercase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase : Optional[Any] = False
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase : Optional[Any] = image.img_to_array(_A )
lowercase : Dict = np.expand_dims(_A , axis=0 )
lowercase : List[str] = original_model.predict(_A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_A , _A , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_A ):
os.mkdir(_A )
# Save converted model and image processor
hf_model.save_pretrained(_A )
preprocessor.save_pretrained(_A )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase : Dict = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_A )
hf_model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
_lowerCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 264 | 1 |
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
A = [[] for _ in range(lowerCAmelCase__ )]
A = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(lowerCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase__ ):
A = position % (lowest * 2) # puts it in bounds
A = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase__ )
A = [''.join(lowerCAmelCase__ ) for row in temp_grid]
A = ''.join(lowerCAmelCase__ )
return output_string
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
A = []
A = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
A = [[] for _ in range(lowerCAmelCase__ )] # generates template
for position in range(len(lowerCAmelCase__ ) ):
A = position % (lowest * 2) # puts it in bounds
A = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
A = 0
for row in temp_grid: # fills in the characters
A = input_string[counter : counter + len(lowerCAmelCase__ )]
grid.append(list(lowerCAmelCase__ ) )
counter += len(lowerCAmelCase__ )
A = '' # reads as zigzag
for position in range(len(lowerCAmelCase__ ) ):
A = position % (lowest * 2) # puts it in bounds
A = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> dict[int, str]:
'''simple docstring'''
A = {}
for key_guess in range(1 , len(lowerCAmelCase__ ) ): # tries every key
A = decrypt(lowerCAmelCase__ , lowerCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 713 |
class lowerCAmelCase__ :
def __init__( self : str , __UpperCamelCase : str = "" , __UpperCamelCase : bool = False ) -> None:
# Mapping from the first character of the prefix of the node
A = {}
# A node will be a leaf if the tree contains its word
A = is_leaf
A = prefix
def __UpperCamelCase ( self : int , __UpperCamelCase : str ) -> tuple[str, str, str]:
A = 0
for q, w in zip(self.prefix , __UpperCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : int , __UpperCamelCase : list[str] ) -> None:
for word in words:
self.insert(__UpperCamelCase )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : str ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
A = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A = RadixNode(prefix=__UpperCamelCase , is_leaf=__UpperCamelCase )
else:
A = self.nodes[word[0]]
A , A , A = incoming_node.match(
__UpperCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__UpperCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A = remaining_prefix
A = self.nodes[matching_string[0]]
A = RadixNode(__UpperCamelCase , __UpperCamelCase )
A = aux_node
if remaining_word == "":
A = True
else:
self.nodes[matching_string[0]].insert(__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : str ) -> bool:
A = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
A , A , A = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> bool:
A = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
A , A , A = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__UpperCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A = list(self.nodes.values() )[0]
A = merging_node.is_leaf
self.prefix += merging_node.prefix
A = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A = False
# If there is 1 edge, we merge it with its child
else:
A = list(incoming_node.nodes.values() )[0]
A = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A = merging_node.nodes
return True
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int = 0 ) -> None:
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase_ ( ) -> bool:
'''simple docstring'''
A = 'banana bananas bandana band apple all beast'.split()
A = RadixNode()
root.insert_many(lowerCAmelCase__ )
assert all(root.find(lowerCAmelCase__ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
A = RadixNode()
A = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCAmelCase__ )
print('Words:' , lowerCAmelCase__ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main() | 224 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( a_):
"""simple docstring"""
a__ : Any = "deit"
def __init__( self : int , __UpperCAmelCase : List[Any]=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Tuple=12 , __UpperCAmelCase : Union[str, Any]=3_072 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Tuple=1E-12 , __UpperCAmelCase : Optional[Any]=224 , __UpperCAmelCase : Optional[Any]=16 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : str=True , __UpperCAmelCase : str=16 , **__UpperCAmelCase : List[Any] , ) -> Dict:
super().__init__(**_snake_case )
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= initializer_range
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= image_size
UpperCAmelCase_= patch_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= qkv_bias
UpperCAmelCase_= encoder_stride
class lowercase ( a_):
"""simple docstring"""
a__ : int = version.parse("1.11")
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return 1E-4
| 593 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'perceiver'
def __init__( self : List[str], _snake_case : Optional[Any]=256, _snake_case : int=1_280, _snake_case : Optional[int]=768, _snake_case : List[str]=1, _snake_case : str=26, _snake_case : Union[str, Any]=8, _snake_case : Optional[int]=8, _snake_case : Optional[int]=None, _snake_case : str=None, _snake_case : List[str]="kv", _snake_case : str=1, _snake_case : Optional[Any]=1, _snake_case : str="gelu", _snake_case : List[Any]=0.1, _snake_case : Any=0.02, _snake_case : Union[str, Any]=1E-12, _snake_case : str=True, _snake_case : Any=262, _snake_case : Union[str, Any]=2_048, _snake_case : List[str]=56, _snake_case : Tuple=[368, 496], _snake_case : Dict=16, _snake_case : Tuple=1_920, _snake_case : Optional[Any]=16, _snake_case : Optional[Any]=[1, 16, 224, 224], **_snake_case : Optional[Any], ):
'''simple docstring'''
super().__init__(**_snake_case )
snake_case : Union[str, Any] =num_latents
snake_case : str =d_latents
snake_case : Any =d_model
snake_case : Any =num_blocks
snake_case : Tuple =num_self_attends_per_block
snake_case : int =num_self_attention_heads
snake_case : str =num_cross_attention_heads
snake_case : List[Any] =qk_channels
snake_case : Tuple =v_channels
snake_case : str =cross_attention_shape_for_attention
snake_case : Union[str, Any] =self_attention_widening_factor
snake_case : Union[str, Any] =cross_attention_widening_factor
snake_case : Optional[int] =hidden_act
snake_case : Any =attention_probs_dropout_prob
snake_case : int =initializer_range
snake_case : str =layer_norm_eps
snake_case : Dict =use_query_residual
# masked language modeling attributes
snake_case : List[Any] =vocab_size
snake_case : List[Any] =max_position_embeddings
# image classification attributes
snake_case : List[str] =image_size
# flow attributes
snake_case : Optional[Any] =train_size
# multimodal autoencoding attributes
snake_case : Dict =num_frames
snake_case : Optional[Any] =audio_samples_per_frame
snake_case : Dict =samples_per_patch
snake_case : Union[str, Any] =output_shape
class lowerCAmelCase_ ( a_ ):
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Tuple ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return 1E-4
def __snake_case ( self : Dict, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional[TensorType] = None, _snake_case : int = 3, _snake_case : int = 40, _snake_case : int = 40, ):
'''simple docstring'''
if isinstance(_snake_case, _snake_case ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Any =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : Tuple =preprocessor.num_special_tokens_to_add(_snake_case )
snake_case : Optional[Any] =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
snake_case : str =[''' '''.join(['''a'''] ) * seq_length] * batch_size
snake_case : int =dict(preprocessor(_snake_case, return_tensors=_snake_case ) )
snake_case : List[str] =inputs.pop('''input_ids''' )
return inputs
elif isinstance(_snake_case, _snake_case ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Union[str, Any] =compute_effective_axis_dimension(_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case : Dict =self._generate_dummy_images(_snake_case, _snake_case, _snake_case, _snake_case )
snake_case : Optional[Any] =dict(preprocessor(images=_snake_case, return_tensors=_snake_case ) )
snake_case : Optional[Any] =inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = list(A__ )
__lowercase = list(A__ )
__lowercase = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count += 1
__lowercase = '''_'''
if count > 1:
return False
else:
return "".join(A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
while True:
__lowercase = ['''$'''] * len(A__ )
__lowercase = []
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
__lowercase = compare_string(binary[i] , binary[j] )
if k is False:
__lowercase = '''*'''
__lowercase = '''*'''
temp.append('''X''' )
for i in range(len(A__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(A__ ) == 0:
return pi
__lowercase = list(set(A__ ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
for minterm in minterms:
__lowercase = ''''''
for _ in range(A__ ):
__lowercase = str(minterm % 2 ) + string
minterm //= 2
temp.append(A__ )
return temp
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = list(A__ )
__lowercase = list(A__ )
__lowercase = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = [0] * len(A__ )
for i in range(len(chart[0] ) ):
__lowercase = 0
__lowercase = -1
for j in range(len(A__ ) ):
if chart[j][i] == 1:
count += 1
__lowercase = j
if count == 1:
__lowercase = 1
for i in range(len(A__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(A__ ) ):
__lowercase = 0
temp.append(prime_implicants[i] )
while True:
__lowercase = 0
__lowercase = -1
__lowercase = 0
for i in range(len(A__ ) ):
__lowercase = chart[i].count(1 )
if count_n > max_n:
__lowercase = count_n
__lowercase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(A__ ) ):
__lowercase = 0
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [[0 for x in range(len(A__ ) )] for x in range(len(A__ ) )]
for i in range(len(A__ ) ):
__lowercase = prime_implicants[i].count('''_''' )
for j in range(len(A__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , A__ ):
__lowercase = 1
return chart
def _A ( ):
"""simple docstring"""
__lowercase = int(input('''Enter the no. of variables\n''' ) )
__lowercase = [
float(A__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__lowercase = decimal_to_binary(A__ , A__ )
__lowercase = check(A__ )
print('''Prime Implicants are:''' )
print(A__ )
__lowercase = prime_implicant_chart(A__ , A__ )
__lowercase = selection(A__ , A__ )
print('''Essential Prime Implicants are:''' )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 624 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 624 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->bool:
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[str]:
lowerCAmelCase = max_length
lowerCAmelCase = max_position_embeddings
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->bool:
lowerCAmelCase = input_ids.shape[-1]
lowerCAmelCase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = start_length
lowerCAmelCase = max_new_tokens
lowerCAmelCase = start_length + max_new_tokens
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->bool:
return input_ids.shape[-1] >= self.max_length
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->str:
lowerCAmelCase = max_time
lowerCAmelCase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->bool:
return time.time() - self.initial_timestamp > self.max_time
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->bool:
return any(criteria(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for stopping_criterium in self:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
return None
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> StoppingCriteriaList:
lowerCAmelCase = stopping_criteria.max_length
lowerCAmelCase = deepcopy(snake_case__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , snake_case__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case__ ) )
return new_stopping_criteria
| 312 | from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> np.ndarray:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> np.ndarray:
return (gray > 1_2_7) & (gray <= 2_5_5)
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> np.ndarray:
lowerCAmelCase = np.zeros_like(snake_case__ )
lowerCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ : Dict = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
lowercase__ : Dict = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ : Dict = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ : Dict = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 312 | 1 |
import argparse
import datetime
def __lowerCamelCase ( __a :str ) -> Dict:
"""simple docstring"""
A__ = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
A__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case_ ) < 1_1:
raise ValueError("""Must be 10 characters long""" )
# Get month
A__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError("""Month must be between 1 - 12""" )
A__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
A__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
A__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
A__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
A__ = datetime.date(int(snake_case_ ) , int(snake_case_ ) , int(snake_case_ ) )
# Start math
if m <= 2:
A__ = y - 1
A__ = m + 1_2
# maths var
A__ = int(str(snake_case_ )[:2] )
A__ = int(str(snake_case_ )[2:] )
A__ = int(2.6 * m - 5.39 )
A__ = int(c / 4 )
A__ = int(k / 4 )
A__ = int(d + k )
A__ = int(t + u + v + x )
A__ = int(z - (2 * c) )
A__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
A__ = F'Your date {date_input}, is a {days[str(snake_case_ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
A : str = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
A : str = parser.parse_args()
zeller(args.date_input)
| 703 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCamelCase ( __a :Tuple , __a :int , __a :Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = AlbertConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
A__ = AlbertForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__a , __a , __a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 247 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCAmelCase_ ):
for x in range(lowerCAmelCase_ ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
a__ : Tuple = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
a__ : Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
a__ : int = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
a__ : str = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
a__ : Tuple = out / out.max() * 2_5_5
a__ : Any = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 682 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "xlm-roberta"
def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 682 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase__ ( lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = 384
if "tiny" in model_name:
UpperCAmelCase = [3, 3, 9, 3]
UpperCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [128, 256, 512, 1_024]
UpperCAmelCase = 512
if "large" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [192, 384, 768, 1_536]
UpperCAmelCase = 768
if "xlarge" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [256, 512, 1_024, 2_048]
UpperCAmelCase = 1_024
# set label information
UpperCAmelCase = 150
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = 'ade20k-id2label.json'
UpperCAmelCase = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = ConvNextConfig(
depths=_lowerCamelCase , hidden_sizes=_lowerCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
UpperCAmelCase = UperNetConfig(
backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(_lowerCamelCase )
UpperCAmelCase = val
def lowercase__ ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )['state_dict']
UpperCAmelCase = get_upernet_config(_lowerCamelCase )
UpperCAmelCase = UperNetForSemanticSegmentation(_lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(_lowerCamelCase )
if "bn" in key:
UpperCAmelCase = key.replace('bn' , 'batch_norm' )
UpperCAmelCase = val
# rename keys
UpperCAmelCase = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify on image
UpperCAmelCase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
UpperCAmelCase = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('RGB' )
UpperCAmelCase = SegformerImageProcessor()
UpperCAmelCase = processor(_lowerCamelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
UpperCAmelCase = model(_lowerCamelCase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _UpperCAmelCase ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase = nn.Linear(3 , 4 )
UpperCAmelCase = nn.BatchNormad(4 )
UpperCAmelCase = nn.Linear(4 , 5 )
def a_ ( self , lowercase_ ) -> Any:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def a_ ( self , lowercase_ , *lowercase_ , **lowercase_ ) -> List[str]:
return (args[0] + 1,) + args[1:], kwargs
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def a_ ( self , lowercase_ , lowercase_ ) -> Union[str, Any]:
return output + 1
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = ModelHook()
add_hook_to_module(lowercase_ , lowercase_ )
self.assertEqual(test_model._hf_hook , lowercase_ )
self.assertTrue(hasattr(lowercase_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase_ )
self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase_ , '_old_forward' ) )
def a_ ( self ) -> Any:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = ModelHook()
add_hook_to_module(lowercase_ , lowercase_ )
add_hook_to_module(lowercase_ , lowercase_ , append=lowercase_ )
self.assertEqual(isinstance(test_model._hf_hook , lowercase_ ) , lowercase_ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowercase_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase_ )
self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase_ , '_old_forward' ) )
def a_ ( self ) -> Any:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = test_model(x + 1 )
UpperCAmelCase = test_model(x + 2 )
UpperCAmelCase = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-5 )
def a_ ( self ) -> List[str]:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = test_model(lowercase_ )
UpperCAmelCase = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
assert torch.allclose(lowercase_ , output + 2 , atol=1E-5 )
def a_ ( self ) -> Tuple:
UpperCAmelCase = ModelForTest()
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = test_model(lowercase_ )
UpperCAmelCase = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
UpperCAmelCase = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
UpperCAmelCase = True
UpperCAmelCase = test_model(lowercase_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase_ , AlignDevicesHook(io_same_device=lowercase_ ) )
UpperCAmelCase = torch.randn(2 , 3 ).to(0 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , torch.device(0 ) )
def a_ ( self ) -> Dict:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCAmelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
UpperCAmelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase = torch.device(lowercase_ )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ , offload_buffers=lowercase_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def a_ ( self ) -> Dict:
UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase = torch.device(lowercase_ )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() , offload_buffers=lowercase_ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCAmelCase = torch.randn(2 , 3 )
UpperCAmelCase = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 183 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowercase_ = {'facebook/blenderbot-3B': 1_28}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = BlenderbotTokenizer
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Any]="replace" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="</s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : List[str]="<pad>" , SCREAMING_SNAKE_CASE_ : Any="<mask>" , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Dict=True , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
_a = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
_a = add_prefix_space
_a = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
_a = add_prefix_space
_a = 'post_processor'
_a = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state['sep'] )
if "cls" in state:
_a = tuple(state['cls'] )
_a = False
if state.get('add_prefix_space' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get('trim_offsets' , SCREAMING_SNAKE_CASE_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(SCREAMING_SNAKE_CASE_ , state.pop('type' ) )
_a = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self : List[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : str ):
_a = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value
_a = value
def _UpperCAmelCase ( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
_a = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Any ):
_a = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
_a = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : "Conversation" ):
_a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE_ )
_a = ' '.join(SCREAMING_SNAKE_CASE_ )
_a = self.encode(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
_a = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 562 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = 42
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int = 6_5_5_3_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (3_2, 3_2, 6_4) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ):
super().__init__()
_a = sample_size
# time
if time_embedding_type == "fourier":
_a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ )
_a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ )
_a = block_out_channels[0]
if use_timestep_embedding:
_a = block_out_channels[0] * 4
_a = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , )
_a = nn.ModuleList([] )
_a = None
_a = nn.ModuleList([] )
_a = None
# down
_a = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
_a = output_channel
_a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_a = i == len(SCREAMING_SNAKE_CASE_ ) - 1
_a = get_down_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
_a = get_mid_block(
SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , )
# up
_a = list(reversed(SCREAMING_SNAKE_CASE_ ) )
_a = reversed_block_out_channels[0]
if out_block_type is None:
_a = out_channels
else:
_a = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
_a = output_channel
_a = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels
)
_a = i == len(SCREAMING_SNAKE_CASE_ ) - 1
_a = get_up_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
_a = output_channel
# out
_a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_a = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ):
_a = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
_a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0:
_a = timesteps[None].to(sample.device )
_a = self.time_proj(SCREAMING_SNAKE_CASE_ )
if self.config.use_timestep_embedding:
_a = self.time_mlp(SCREAMING_SNAKE_CASE_ )
else:
_a = timestep_embed[..., None]
_a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_a = ()
for downsample_block in self.down_blocks:
_a , _a = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_a = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_a = down_block_res_samples[-1:]
_a = down_block_res_samples[:-1]
_a = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
# 5. post-process
if self.out_block:
_a = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
| 562 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 720 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643 | 0 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowerCAmelCase ( *lowerCAmelCase ):
'''simple docstring'''
with open(lowerCAmelCase , """r""" ) as fh:
fcntl.flock(lowerCAmelCase , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase )
finally:
fcntl.flock(lowerCAmelCase , fcntl.LOCK_UN )
lowerCAmelCase_ : Dict = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowerCAmelCase_ : int = torch.device('''cuda''', local_rank)
lowerCAmelCase_ : List[str] = socket.gethostname()
lowerCAmelCase_ : str = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase_ : Tuple = dist.get_rank()
lowerCAmelCase_ : Any = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 673 |
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCAmelCase = 100 ):
'''simple docstring'''
UpperCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 673 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
SCREAMING_SNAKE_CASE = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
with open(UpperCAmelCase_ , "rb" ) as f:
UpperCamelCase = Image.open(UpperCAmelCase_ )
return im.convert("RGB" )
@dataclass
class __a :
UpperCamelCase_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
UpperCamelCase_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] = field(default=_lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCamelCase_ : Optional[str] = field(default=_lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCamelCase_ : Optional[float] = field(
default=0.1_5 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCamelCase_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Any )-> str:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class __a :
UpperCamelCase_ : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
UpperCamelCase_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowerCAmelCase )} , )
UpperCamelCase_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : str = field(default=_lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase_ : bool = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase_ : bool = field(
default=_lowerCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
UpperCamelCase = torch.stack([example["pixel_values"] for example in examples] )
UpperCamelCase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCamelCase__ ( )-> Dict:
"""simple docstring"""
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCamelCase = {}
if data_args.train_dir is not None:
UpperCamelCase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
UpperCamelCase = os.path.join(data_args.validation_dir , "**" )
UpperCamelCase = load_dataset(
"imagefolder" , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase_ ) and data_args.train_val_split > 0.0:
UpperCamelCase = dataset["train"].train_test_split(data_args.train_val_split )
UpperCamelCase = split["train"]
UpperCamelCase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase = dataset["train"].features["labels"].names
UpperCamelCase , UpperCamelCase = {}, {}
for i, label in enumerate(UpperCAmelCase_ ):
UpperCamelCase = str(UpperCAmelCase_ )
UpperCamelCase = label
# Load the accuracy metric from the datasets package
UpperCamelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase_ ) , labelaid=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
UpperCamelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["shortest_edge"]
else:
UpperCamelCase = (image_processor.size["height"], image_processor.size["width"])
UpperCamelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
UpperCamelCase = Compose(
[
RandomResizedCrop(UpperCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
UpperCamelCase = Compose(
[
Resize(UpperCAmelCase_ ),
CenterCrop(UpperCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(UpperCAmelCase_ ):
UpperCamelCase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(UpperCAmelCase_ ):
UpperCamelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCamelCase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(UpperCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(UpperCAmelCase_ )
# Initalize our trainer
UpperCamelCase = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("eval" , UpperCAmelCase_ )
trainer.save_metrics("eval" , UpperCAmelCase_ )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 701 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError("Input value must be a 'int' type" )
return bin(UpperCAmelCase_ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = 'imagegpt'
UpperCamelCase : Optional[Any] = ['past_key_values']
UpperCamelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , UpperCAmelCase__ : Optional[Any]=512 + 1 , UpperCAmelCase__ : Any=32 * 32 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : Any=24 , UpperCAmelCase__ : Optional[Any]=8 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str="quick_gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Dict=1E-5 , UpperCAmelCase__ : List[Any]=0.0_2 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : List[Any]=False , **UpperCAmelCase__ : Optional[int] , ) -> int:
_a : Dict = vocab_size
_a : int = n_positions
_a : Union[str, Any] = n_embd
_a : List[Any] = n_layer
_a : Optional[int] = n_head
_a : List[str] = n_inner
_a : str = activation_function
_a : Union[str, Any] = resid_pdrop
_a : int = embd_pdrop
_a : int = attn_pdrop
_a : Tuple = layer_norm_epsilon
_a : Optional[int] = initializer_range
_a : List[Any] = scale_attn_weights
_a : int = use_cache
_a : List[Any] = scale_attn_by_inverse_layer_idx
_a : Optional[Any] = reorder_and_upcast_attn
_a : Any = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
@property
def _lowercase ( self : Optional[int] ) -> Any:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : "FeatureExtractionMixin" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , ) -> Tuple:
_a : List[str] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_a : List[Any] = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return inputs
| 389 |
"""simple docstring"""
from math import ceil
def __a ( a, a ):
"""simple docstring"""
_a = list(range(0, a ) )
_a = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_a = []
for i in device_map_blocks:
if device_map_blocks.count(a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(a )
# Missing blocks
_a = [i for i in blocks if i not in device_map_blocks]
_a = [i for i in device_map_blocks if i not in blocks]
if len(a ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(a ) )
def __a ( a, a ):
"""simple docstring"""
_a = list(range(a ) )
_a = int(ceil(n_layers / len(a ) ) )
_a = [layers[i : i + n_blocks] for i in range(0, a, a )]
return dict(zip(a, a ) )
| 388 | 0 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
lowerCAmelCase_: List[str] = True
from torch.cuda.amp import autocast
lowerCAmelCase_: Optional[int] = logging.getLogger(__name__)
@dataclass
class a__ :
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=_a , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
snake_case_ = field(
default=_a , metadata={"help": "Whether to log verbose messages or not."} , )
snake_case_ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
snake_case_ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
snake_case_ = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __a ( A , A ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ = logging.WARNING
if model_args.verbose_logging:
lowercase__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase__ = logging.INFO
logger.setLevel(A )
@dataclass
class a__ :
snake_case_ = field(
default=_a , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
snake_case_ = field(
default=_a , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
snake_case_ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
snake_case_ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to \'file\'"} , )
snake_case_ = field(
default=_a , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there\'s no validation split"
} , )
snake_case_ = field(
default=_a , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case_ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class a__ :
snake_case_ = 42
snake_case_ = 42
snake_case_ = "longest"
snake_case_ = None
snake_case_ = None
def __call__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.feature_extractor.pad(
UpperCAmelCase__, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
lowercase__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowercase__ = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowercase__ = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase__ = 1
lowercase__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=UpperCAmelCase__, min_masks=2, )
return batch
class a__ ( _a ):
def __init__( self, *_UpperCAmelCase, _UpperCAmelCase=1, _UpperCAmelCase=0, _UpperCAmelCase=1.0, **_UpperCAmelCase ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__, **UpperCAmelCase__ )
lowercase__ = 0
lowercase__ = max_gumbel_temp
lowercase__ = min_gumbel_temp
lowercase__ = gumbel_temp_decay
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
model.train()
lowercase__ = self._prepare_inputs(UpperCAmelCase__ )
if self.use_amp:
with autocast():
lowercase__ = self.compute_loss(UpperCAmelCase__, UpperCAmelCase__ )
else:
lowercase__ = self.compute_loss(UpperCAmelCase__, UpperCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowercase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase__, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def __a ( ):
'''simple docstring'''
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ = parser.parse_args_into_dataclasses()
configure_logger(A , A )
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase__ = DatasetDict()
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase__ = DatasetDict()
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A )
def prepare_dataset(A ):
# check that all files have the correct sampling rate
lowercase__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase__ = datasets.map(
A , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
lowercase__ = vectorized_datasets.filter(
lambda A : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase__ = vectorized_datasets.map(
A , batched=A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm=\'layer\'" )
lowercase__ = WavaVecaForPreTraining(A )
lowercase__ = DataCollatorForWavaVecaPretraining(model=A , feature_extractor=A )
lowercase__ = WavaVecaPreTrainer(
model=A , data_collator=A , args=A , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=A , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 718 | """simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ="▁"
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__SCREAMING_SNAKE_CASE ={
"facebook/s2t-small-librispeech-asr": 1024,
}
__SCREAMING_SNAKE_CASE =["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__SCREAMING_SNAKE_CASE ={"mustc": MUSTC_LANGS}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = MAX_MODEL_INPUT_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = []
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,do_upper_case=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,lang_codes=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,)
lowercase_ : str = do_upper_case
lowercase_ : int = do_lower_case
lowercase_ : str = load_json(__UpperCamelCase )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = spm_file
lowercase_ : List[Any] = load_spm(__UpperCamelCase ,self.sp_model_kwargs )
if lang_codes is not None:
lowercase_ : List[Any] = lang_codes
lowercase_ : List[str] = LANGUAGES[lang_codes]
lowercase_ : Any = [f'''<lang:{lang}>''' for lang in self.langs]
lowercase_ : List[Any] = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
lowercase_ : Tuple = self.lang_tokens
lowercase_ : Any = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase_ : List[Any] = {}
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : List[Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : List[Any] = self.lang_code_to_id[tgt_lang]
lowercase_ : Optional[int] = [lang_code_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
return self.encoder.get(__UpperCamelCase ,self.encoder[self.unk_token] )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
return self.decoder.get(__UpperCamelCase ,self.unk_token )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Dict = []
lowercase_ : Optional[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase_ : List[str] = self.sp_model.decode(__UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase_ : int = []
else:
current_sub_tokens.append(__UpperCamelCase )
lowercase_ : str = self.sp_model.decode(__UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase )
lowercase_ : str = [1] * len(self.prefix_tokens )
lowercase_ : str = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = self.__dict__.copy()
lowercase_ : str = None
return state
def __setstate__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
lowercase_ : Union[str, Any] = {}
lowercase_ : int = load_spm(self.spm_file ,self.sp_model_kwargs )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : List[str] = Path(__UpperCamelCase )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
lowercase_ : Tuple = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowercase_ : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,__UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,__UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCamelCase ,'wb' ) as fi:
lowercase_ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (str(__UpperCamelCase ), str(__UpperCamelCase ))
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = sentencepiece.SentencePieceProcessor(**a__ )
spm.Load(str(a__ ) )
return spm
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
with open(a__ , 'r' ) as f:
return json.load(a__ )
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
with open(a__ , 'w' ) as f:
json.dump(a__ , a__ , indent=2 )
| 425 |
"""simple docstring"""
import itertools
import math
def UpperCAmelCase ( a__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :str = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCAmelCase ( a__ = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , a__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 553 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Any:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase , allow_redirects=_lowerCAmelCase )
_UpperCAmelCase = result.headers["Location"]
_UpperCAmelCase = requests.get(_lowerCAmelCase , allow_redirects=_lowerCAmelCase )
_UpperCAmelCase = os.path.join(_lowerCAmelCase , F'''{artifact_name}.zip''' )
with open(_lowerCAmelCase , "wb" ) as fp:
fp.write(response.content )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> int:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCAmelCase ) as f:
for line in f:
_UpperCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(": " )]
_UpperCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("FAILED " ) :]
failed_tests.append(_lowerCAmelCase )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCAmelCase )} for `errors` '''
F'''and {len(_lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_lowerCAmelCase , _lowerCAmelCase )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_lowerCAmelCase , _lowerCAmelCase )]
return result
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Dict:
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for p in os.listdir(_lowerCAmelCase ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCAmelCase , job_links=_lowerCAmelCase ) )
return errors
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) )
return r
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_UpperCAmelCase = test.split("/" )[2]
else:
_UpperCAmelCase = None
return test
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Tuple:
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"count": n_errors, "errors": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) )
return r
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = "| no. | error | status |"
_UpperCAmelCase = "|-:|:-|:-|"
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["count"]
_UpperCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(_lowerCAmelCase )
return "\n".join(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = "| model | no. of errors | major error | count |"
_UpperCAmelCase = "|-:|-:|-:|-:|"
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["count"]
_UpperCAmelCase , _UpperCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_UpperCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(_lowerCAmelCase )
return "\n".join(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__lowerCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__lowerCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
__lowerCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__lowerCAmelCase = k.find(" / ")
__lowerCAmelCase = k[index + len(" / ") :]
__lowerCAmelCase = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__lowerCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__lowerCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__lowerCAmelCase = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__lowerCAmelCase = reduce_by_error(errors)
__lowerCAmelCase = reduce_by_model(errors)
__lowerCAmelCase = make_github_table(reduced_by_error)
__lowerCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 712 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = ["""keras_nlp"""]
def __init__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ):
requires_backends(self , ["keras_nlp"] )
| 129 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowercase_ = get_logger(__name__)
class __a ( enum.Enum ):
SCREAMING_SNAKE_CASE = "all_checks"
SCREAMING_SNAKE_CASE = "basic_checks"
SCREAMING_SNAKE_CASE = "no_checks"
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a ( SCREAMING_SNAKE_CASE ):
pass
def __lowerCAmelCase ( __lowerCamelCase : Optional[dict] , __lowerCamelCase : dict , __lowerCamelCase : Dict=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
__lowerCAmelCase =[url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowerCAmelCase =""" for """ + verification_name if verification_name is not None else """"""
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a ( SCREAMING_SNAKE_CASE ):
pass
class __a ( SCREAMING_SNAKE_CASE ):
pass
def __lowerCAmelCase ( __lowerCamelCase : Optional[dict] , __lowerCamelCase : dict ) -> Any:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
__lowerCAmelCase =[
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info("""All the splits matched successfully.""" )
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : bool = True ) -> dict:
if record_checksum:
__lowerCAmelCase =shaaaa()
with open(__lowerCamelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"""""" ):
m.update(__lowerCamelCase )
__lowerCAmelCase =m.hexdigest()
else:
__lowerCAmelCase =None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def __lowerCAmelCase ( __lowerCamelCase : Optional[int] ) -> List[Any]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 354 |
from __future__ import annotations
class __a :
def __init__( self : List[Any] , snake_case_ : str , snake_case_ : str)-> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase =text, pattern
__lowerCAmelCase , __lowerCAmelCase =len(snake_case_), len(snake_case_)
def UpperCamelCase ( self : List[Any] , snake_case_ : str)-> int:
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase ( self : List[str] , snake_case_ : int)-> int:
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase ( self : Dict)-> list[int]:
# searches pattern in text and returns index positions
__lowerCAmelCase =[]
for i in range(self.textLen - self.patLen + 1):
__lowerCAmelCase =self.mismatch_in_text(snake_case_)
if mismatch_index == -1:
positions.append(snake_case_)
else:
__lowerCAmelCase =self.match_in_pattern(self.text[mismatch_index])
__lowerCAmelCase =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase_ = '''ABAABA'''
lowercase_ = '''AB'''
lowercase_ = BoyerMooreSearch(text, pattern)
lowercase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 354 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _UpperCamelCase :
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Any=7 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: List[Any]=False , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Any=99 , _SCREAMING_SNAKE_CASE: List[str]=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Optional[int]=4 , _SCREAMING_SNAKE_CASE: str=37 , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[Any]=512 , _SCREAMING_SNAKE_CASE: Optional[Any]=16 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def lowercase ( self: Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_SCREAMING_SNAKE_CASE , )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = FalconModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , ) -> str:
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = FalconModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
UpperCamelCase_ = FalconForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = FalconForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )["hidden_states"][0]
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def lowercase ( self: Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : Optional[int] = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : int = False
def lowercase ( self: str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = FalconModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ , *UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCamelCase_ = alibi
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ = FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: Dict ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = "single_label_classification"
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ = FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = FalconForCausalLM(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = input_ids.shape[0]
UpperCamelCase_ = model._convert_to_rw_cache(result.past_key_values )
UpperCamelCase_ = model._convert_cache_to_standard_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for layer in range(len(_SCREAMING_SNAKE_CASE ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = "multi_label_classification"
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase_ = FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_SCREAMING_SNAKE_CASE , "use_cache" ):
return
UpperCamelCase_ = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
if "use_cache" not in inputs:
UpperCamelCase_ = True
UpperCamelCase_ = model(**_SCREAMING_SNAKE_CASE )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCamelCase_ = (
getattr(_SCREAMING_SNAKE_CASE , "decoder_layers" , _SCREAMING_SNAKE_CASE )
or getattr(_SCREAMING_SNAKE_CASE , "num_decoder_layers" , _SCREAMING_SNAKE_CASE )
or config.num_hidden_layers
)
UpperCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , "num_kv_heads" , config.num_attention_heads )
UpperCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , "d_model" , config.hidden_size )
UpperCamelCase_ = embed_dim // num_attention_heads
UpperCamelCase_ = outputs["past_key_values"]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = inputs["input_ids"].shape
for i in range(_SCREAMING_SNAKE_CASE ):
if config.new_decoder_architecture:
UpperCamelCase_ = config.num_attention_heads
elif config.multi_query:
UpperCamelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
UpperCamelCase_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
UpperCamelCase_ = model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=19 )
UpperCamelCase_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: Dict ) -> Dict:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCamelCase_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FalconForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**_SCREAMING_SNAKE_CASE , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCamelCase_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FalconForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.eval()
model.to(device=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# Test results are the same with and without cache
UpperCamelCase_ = model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=_SCREAMING_SNAKE_CASE )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 371 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
@staticmethod
def lowercase ( *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCamelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase_ = len(_SCREAMING_SNAKE_CASE )
self.assertGreater(_SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
"score": ANY(_SCREAMING_SNAKE_CASE ),
"label": ANY(_SCREAMING_SNAKE_CASE ),
"box": {"xmin": ANY(_SCREAMING_SNAKE_CASE ), "ymin": ANY(_SCREAMING_SNAKE_CASE ), "xmax": ANY(_SCREAMING_SNAKE_CASE ), "ymax": ANY(_SCREAMING_SNAKE_CASE )},
}
for i in range(_SCREAMING_SNAKE_CASE )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase ( self: Tuple ) -> List[str]:
"""simple docstring"""
pass
@require_torch
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCamelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
UpperCamelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowercase ( self: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
UpperCamelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
pass
@require_torch
@slow
def lowercase ( self: Any ) -> int:
"""simple docstring"""
UpperCamelCase_ = 0.2
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = 2
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 371 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE__ : Dict = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :List[Any] = calculate_rouge(snake_case, snake_case, bootstrap_aggregation=snake_case, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(snake_case, snake_case )
__magic_name__ :List[Any] = calculate_rouge(snake_case, snake_case, bootstrap_aggregation=snake_case, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Dict = '''rougeLsum'''
__magic_name__ :Any = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=[k] )[k]
__magic_name__ :List[str] = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=[k] )[k]
assert score > score_no_sep
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = ['''rouge1''', '''rouge2''', '''rougeL''']
__magic_name__ :int = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=snake_case )
__magic_name__ :Optional[int] = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=snake_case )
assert score_sep == score_no_sep
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Any = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
__magic_name__ :Dict = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(snake_case, snake_case, newline_sep=snake_case ) == calculate_rouge(snake_case, snake_case, newline_sep=snake_case )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Tuple = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
__magic_name__ :int = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
__magic_name__ :Union[str, Any] = calculate_rouge(snake_case, snake_case, rouge_keys=['''rougeLsum'''], newline_sep=snake_case )['''rougeLsum''']
__magic_name__ :List[Any] = calculate_rouge(snake_case, snake_case, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :int = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
__magic_name__ :Tuple = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(snake_case, snake_case )
__magic_name__ :Union[str, Any] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=snake_case )
assert isinstance(snake_case, snake_case )
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_ : str = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
lowerCAmelCase__ : str
lowerCAmelCase__ : List[str]
lowerCAmelCase__ : Optional[List[str]]
@dataclass
class UpperCamelCase__ :
lowerCAmelCase__ : List[int]
lowerCAmelCase__ : List[int]
lowerCAmelCase__ : Optional[List[int]] = None
lowerCAmelCase__ : Optional[List[int]] = None
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : List[str] = "train"
lowerCAmelCase__ : Optional[Any] = "dev"
lowerCAmelCase__ : Optional[int] = "test"
class UpperCamelCase__ :
@staticmethod
def __a ( lowerCamelCase : List[Any] , lowerCamelCase : Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __a ( lowerCamelCase : str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __a ( lowerCamelCase : List[InputExample] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : List[str]=False , lowerCamelCase : Dict="[CLS]" , lowerCamelCase : int=1 , lowerCamelCase : int="[SEP]" , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Dict=0 , lowerCamelCase : Any=-1_0_0 , lowerCamelCase : Any=0 , lowerCamelCase : List[str]=True , ):
'''simple docstring'''
a__ = {label: i for i, label in enumerate(lowerCamelCase )}
a__ = []
for ex_index, example in enumerate(lowerCamelCase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , lowerCamelCase , len(lowerCamelCase ) )
a__ = []
a__ = []
for word, label in zip(example.words , example.labels ):
a__ = tokenizer.tokenize(lowerCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCamelCase ) > 0:
tokens.extend(lowerCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a__ = tokenizer.num_special_tokens_to_add()
if len(lowerCamelCase ) > max_seq_length - special_tokens_count:
a__ = tokens[: (max_seq_length - special_tokens_count)]
a__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a__ = [sequence_a_segment_id] * len(lowerCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a__ = [cls_token] + tokens
a__ = [pad_token_label_id] + label_ids
a__ = [cls_token_segment_id] + segment_ids
a__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a__ = [1 if mask_padding_with_zero else 0] * len(lowerCamelCase )
# Zero-pad up to the sequence length.
a__ = max_seq_length - len(lowerCamelCase )
if pad_on_left:
a__ = ([pad_token] * padding_length) + input_ids
a__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a__ = ([pad_token_segment_id] * padding_length) + segment_ids
a__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCamelCase ) == max_seq_length
assert len(lowerCamelCase ) == max_seq_length
assert len(lowerCamelCase ) == max_seq_length
assert len(lowerCamelCase ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(lowerCamelCase ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(lowerCamelCase ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(lowerCamelCase ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(lowerCamelCase ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(lowerCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
a__ = None
features.append(
InputFeatures(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , label_ids=lowerCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : List[InputFeatures]
lowerCAmelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , lowerCamelCase : TokenClassificationTask , lowerCamelCase : str , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Split = Split.train , ):
'''simple docstring'''
# Load data features from cache or dataset file
a__ = os.path.join(
lowerCamelCase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(lowerCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ = cached_features_file + ".lock"
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
a__ = torch.load(lowerCamelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
a__ = token_classification_task.read_examples_from_file(lowerCamelCase , lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a__ = token_classification_task.convert_examples_to_features(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , lowerCamelCase )
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , lowerCamelCase : Any ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ :
lowerCAmelCase__ : List[InputFeatures]
lowerCAmelCase__ : int = -100
def __init__( self : Optional[int] , lowerCamelCase : TokenClassificationTask , lowerCamelCase : str , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] = None , lowerCamelCase : Tuple=False , lowerCamelCase : Split = Split.train , ):
'''simple docstring'''
a__ = token_classification_task.read_examples_from_file(lowerCamelCase , lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a__ = token_classification_task.convert_examples_to_features(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a__ = tf.data.Dataset.from_generator(
lowerCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
a__ = tf.data.Dataset.from_generator(
lowerCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __a ( self : Dict ):
'''simple docstring'''
a__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , lowerCamelCase : Tuple ):
'''simple docstring'''
return self.features[i]
| 289 |
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : int = 400_0000 ) -> int:
a__ = [0, 1]
a__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
a__ = 0
for j in range(len(__lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 289 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : str = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __lowercase ( _lowercase ):
lowerCamelCase : Union[str, Any] = "data2vec-vision"
def __init__(self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1E-12 , A=2_2_4 , A=1_6 , A=3 , A=False , A=False , A=False , A=False , A=0.1 , A=0.1 , A=True , A=[3, 5, 7, 1_1] , A=[1, 2, 3, 6] , A=True , A=0.4 , A=2_5_6 , A=1 , A=False , A=2_5_5 , **A , ):
super().__init__(**A )
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : str = image_size
lowerCamelCase_ : Optional[Any] = patch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Optional[int] = use_mask_token
lowerCamelCase_ : Dict = use_absolute_position_embeddings
lowerCamelCase_ : List[str] = use_relative_position_bias
lowerCamelCase_ : List[Any] = use_shared_relative_position_bias
lowerCamelCase_ : List[str] = layer_scale_init_value
lowerCamelCase_ : Union[str, Any] = drop_path_rate
lowerCamelCase_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase_ : int = out_indices
lowerCamelCase_ : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ : Tuple = use_auxiliary_head
lowerCamelCase_ : Tuple = auxiliary_loss_weight
lowerCamelCase_ : Any = auxiliary_channels
lowerCamelCase_ : List[Any] = auxiliary_num_convs
lowerCamelCase_ : int = auxiliary_concat_input
lowerCamelCase_ : List[str] = semantic_loss_ignore_index
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase__ (self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ (self ):
return 1E-4
| 422 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_5_5 , A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ : int = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : List[Any] = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : Optional[int] = do_normalize
lowerCamelCase_ : Union[str, Any] = image_mean
lowerCamelCase_ : str = image_std
lowerCamelCase_ : List[Any] = do_rescale
lowerCamelCase_ : str = rescale_factor
lowerCamelCase_ : Optional[int] = do_pad
def UpperCAmelCase__ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ (self , A , A=False ):
if not batched:
lowerCamelCase_ : Any = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase_, lowerCamelCase_ : int = image.size
else:
lowerCamelCase_, lowerCamelCase_ : Any = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : str = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ : Any = self.size['''shortest_edge''']
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
else:
lowerCamelCase_ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase_ : int = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def UpperCAmelCase__ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Optional[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ (self ):
# prepare image and target
lowerCamelCase_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Any = json.loads(f.read() )
lowerCamelCase_ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase_ : Optional[Any] = DetaImageProcessor()
lowerCamelCase_ : Optional[int] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase_ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def UpperCAmelCase__ (self ):
# prepare image, target and masks_path
lowerCamelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Tuple = json.loads(f.read() )
lowerCamelCase_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase_ : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ : Any = DetaImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ : Dict = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase_ : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase_ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 422 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ : int = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ["ViTFeatureExtractor"]
a_ : Optional[int] = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """dandelin/vilt-b32-finetuned-vqa"""
_lowerCAmelCase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_lowerCAmelCase = """image_qa"""
_lowerCAmelCase = AutoProcessor
_lowerCAmelCase = AutoModelForVisualQuestionAnswering
_lowerCAmelCase = ["""image""", """text"""]
_lowerCAmelCase = ["""text"""]
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Tuple:
requires_backends(self , ['vision'] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Tuple:
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='pt' )
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
_a = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 532 | 0 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
def lowercase( UpperCamelCase_ ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase( UpperCamelCase_ ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase( ) -> None: # Main function for testing.
'''simple docstring'''
UpperCamelCase = Node(1 )
UpperCamelCase = Node(2 )
UpperCamelCase = Node(3 )
UpperCamelCase = Node(4 )
UpperCamelCase = Node(5 )
UpperCamelCase = Node(6 )
UpperCamelCase = Node(7 )
UpperCamelCase = Node(8 )
UpperCamelCase = Node(9 )
print(is_full_binary_tree(UpperCamelCase_ ) )
print(depth_of_tree(UpperCamelCase_ ) )
print("""Tree is: """ )
display(UpperCamelCase_ )
if __name__ == "__main__":
main()
| 537 | import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , lowerCamelCase_ )
UpperCamelCase = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
| 537 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : Any = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
_snake_case : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _A ( __snake_case :Tuple , __snake_case :str=False ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_model(
"HTSAT-tiny" , "roberta" , __snake_case , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__snake_case , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def _A ( __snake_case :Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = R".*sequential.(\d+).*"
__SCREAMING_SNAKE_CASE = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE = re.match(__snake_case , __snake_case ).group(1 )
__SCREAMING_SNAKE_CASE = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(__snake_case )//3}.linear.''' )
elif re.match(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = int(re.match(__snake_case , __snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE = query_layer
__SCREAMING_SNAKE_CASE = key_layer
__SCREAMING_SNAKE_CASE = value_layer
else:
__SCREAMING_SNAKE_CASE = value
return model_state_dict
def _A ( __snake_case :str , __snake_case :Optional[int] , __snake_case :int , __snake_case :str=False ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = init_clap(__snake_case , enable_fusion=__snake_case )
clap_model.eval()
__SCREAMING_SNAKE_CASE = clap_model.state_dict()
__SCREAMING_SNAKE_CASE = rename_state_dict(__snake_case )
__SCREAMING_SNAKE_CASE = ClapConfig()
__SCREAMING_SNAKE_CASE = enable_fusion
__SCREAMING_SNAKE_CASE = ClapModel(__snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(__snake_case , strict=__snake_case )
model.save_pretrained(__snake_case )
transformers_config.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
_snake_case : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 214 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =LEDConfig
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ ="""gelu"""
def __init__( self, _a, _a=13, _a=7, _a=True, _a=False, _a=99, _a=32, _a=2, _a=4, _a=37, _a=0.1, _a=0.1, _a=20, _a=2, _a=1, _a=0, _a=4, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor], axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_a, _a, _a )
__SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]], axis=-1, )
__SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self, _a, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = TFLEDModel(config=_a ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, use_cache=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), config.vocab_size )
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens], axis=-1 )
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask], axis=-1 )
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a )[0]
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,), output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a, _a, rtol=1E-3 )
def _A ( __snake_case :Any , __snake_case :Dict , __snake_case :List[Any] , __snake_case :List[Any]=None , __snake_case :Optional[Any]=None , __snake_case :Any=None , __snake_case :List[str]=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a )
def __lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(_a ):
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
__SCREAMING_SNAKE_CASE = len(_a )
self.assertEqual(config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(config.output_hidden_states, _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(_a ) )
self.assertEqual(model.config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
# TODO: Head-masking not yet implement
pass
def _A ( __snake_case :Optional[int] ) -> List[Any]:
"""simple docstring"""
return tf.constant(__snake_case , dtype=tf.intaa )
_snake_case : int = 1e-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a )
__SCREAMING_SNAKE_CASE = model(**_a )[0]
__SCREAMING_SNAKE_CASE = (1, 10_24, 7_68)
self.assertEqual(output.shape, _a )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], )
tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a )
__SCREAMING_SNAKE_CASE = model(**_a )[0]
__SCREAMING_SNAKE_CASE = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape, _a )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], )
tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3, rtol=1E-3 )
| 214 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """roc_bert"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0_5_2_2 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=3_0_7_2 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=9_1_0 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2_4_8_5_8 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
a_ : Optional[int] = vocab_size
a_ : str = max_position_embeddings
a_ : List[Any] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : List[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = initializer_range
a_ : str = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : str = use_cache
a_ : Tuple = enable_pronunciation
a_ : Dict = enable_shape
a_ : int = pronunciation_embed_dim
a_ : List[Any] = pronunciation_vocab_size
a_ : int = shape_embed_dim
a_ : List[str] = shape_vocab_size
a_ : List[Any] = concat_input
a_ : List[str] = position_embedding_type
a_ : Any = classifier_dropout
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 473 | """simple docstring"""
UpperCamelCase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 473 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_A = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCAmelCase ( a_ ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase : int = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ )
| 133 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase ( ):
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class _lowercase ( nn.Module ):
def __init__( self ) -> Optional[Any]:
super().__init__()
lowerCamelCase : Dict = nn.Linear(3 , 4 )
lowerCamelCase : Optional[int] = nn.BatchNormad(4 )
lowerCamelCase : List[str] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase_ ) ) )
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCamelCase , lowerCamelCase : List[str] = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase_ ):
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = torch.cuda.memory_allocated()
lowerCamelCase : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase_ )
lowerCamelCase : Tuple = release_memory(UpperCAmelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase_ )
| 133 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = 'roberta-prelayernorm'
def __init__( self: int , __lowerCAmelCase: List[Any]=50_265 , __lowerCAmelCase: Optional[int]=768 , __lowerCAmelCase: Tuple=12 , __lowerCAmelCase: List[str]=12 , __lowerCAmelCase: int=3_072 , __lowerCAmelCase: Tuple="gelu" , __lowerCAmelCase: str=0.1 , __lowerCAmelCase: List[Any]=0.1 , __lowerCAmelCase: Dict=512 , __lowerCAmelCase: Any=2 , __lowerCAmelCase: int=0.02 , __lowerCAmelCase: Dict=1E-12 , __lowerCAmelCase: List[Any]=1 , __lowerCAmelCase: Any=0 , __lowerCAmelCase: List[str]=2 , __lowerCAmelCase: Optional[int]="absolute" , __lowerCAmelCase: List[Any]=True , __lowerCAmelCase: List[Any]=None , **__lowerCAmelCase: List[str] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self: List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 221 |
from statistics import mean, stdev
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int = 3 ) -> list:
'''simple docstring'''
A__ = min(SCREAMING_SNAKE_CASE_ )
A__ = max(SCREAMING_SNAKE_CASE_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE_ ) for x in data]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int = 3 ) -> list:
'''simple docstring'''
A__ = mean(SCREAMING_SNAKE_CASE_ )
A__ = stdev(SCREAMING_SNAKE_CASE_ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE_ ) for x in data]
| 514 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
A_ : Dict = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
A_ : Dict = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A_ : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
A_ : Union[str, Any] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(a_ )-1}" )
if "norm" in key:
A_ : Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A_ : Dict = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
A_ : Dict = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(a_ )-1}" )
if "layer_norm1" in key:
A_ : List[str] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
A_ : Optional[Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
A_ : int = key[key.find("""block""" ) + len("""block""" )]
A_ : Optional[int] = key.replace(F"block{idx}" , F"block.{int(a_ )-1}" )
if "attn.q" in key:
A_ : Tuple = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
A_ : List[Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
A_ : Union[str, Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
A_ : Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
A_ : Tuple = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
A_ : Dict = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
A_ : int = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
A_ : str = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A_ : List[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
A_ : int = key.replace(F"linear_c{idx}" , F"linear_c.{int(a_ )-1}" )
if "bot_conv" in key:
A_ : Any = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
A_ : Union[str, Any] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
A_ : Tuple = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
A_ : str = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
A_ : Tuple = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
A_ : Optional[Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
A_ : List[str] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
A_ : Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
A_ : Union[str, Any] = value
return new_state_dict
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A_ : Union[str, Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
A_ : List[str] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
A_ : List[Any] = kv_weight[
: config.hidden_sizes[i], :
]
A_ : Dict = kv_bias[: config.hidden_sizes[i]]
A_ : List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
A_ : Union[str, Any] = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Union[str, Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_=False , a_=None ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
A_ : Tuple = GLPNImageProcessor()
# prepare image
A_ : Dict = prepare_img()
A_ : Union[str, Any] = image_processor(images=a_ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
A_ : Dict = torch.load(a_ , map_location=torch.device("""cpu""" ) )
# rename keys
A_ : Any = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
A_ : Any = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
A_ : List[Any] = model(a_ )
A_ : Optional[Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A_ : str = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
A_ : Dict = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
A_ : str = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , a_ , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=a_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=a_ , )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vit_msn'''
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-06 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , ) -> List[Any]:
super().__init__(**_lowerCamelCase )
A_ : Tuple = hidden_size
A_ : List[str] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : List[str] = layer_norm_eps
A_ : Optional[Any] = image_size
A_ : Dict = patch_size
A_ : Dict = num_channels
A_ : Dict = qkv_bias
| 385 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = ["""input_features""", """is_longer"""]
def __init__(self , lowercase__=64 , lowercase__=4_80_00 , lowercase__=4_80 , lowercase__=10 , lowercase__=10_24 , lowercase__=0.0 , lowercase__=False , lowercase__ = 0 , lowercase__ = 1_40_00 , lowercase__ = None , lowercase__ = "fusion" , lowercase__ = "repeatpad" , **lowercase__ , ):
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
snake_case_ : int = top_db
snake_case_ : List[str] = truncation
snake_case_ : List[Any] = padding
snake_case_ : List[str] = fft_window_size
snake_case_ : str = (fft_window_size >> 1) + 1
snake_case_ : Optional[Any] = hop_length
snake_case_ : str = max_length_s
snake_case_ : Optional[int] = max_length_s * sampling_rate
snake_case_ : List[str] = sampling_rate
snake_case_ : Dict = frequency_min
snake_case_ : int = frequency_max
snake_case_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm=_snake_case , mel_scale="""htk""" , )
snake_case_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm="""slaney""" , mel_scale="""slaney""" , )
def __UpperCamelCase (self ):
snake_case_ : List[str] = copy.deepcopy(self.__dict__ )
snake_case_ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : str = spectrogram(
_snake_case , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_snake_case , log_mel="""dB""" , )
return log_mel_spectrogram.T
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : str = [0]
# randomly choose index for each part
snake_case_ : Dict = np.random.choice(ranges[0] )
snake_case_ : Optional[Any] = np.random.choice(ranges[1] )
snake_case_ : List[Any] = np.random.choice(ranges[2] )
snake_case_ : str = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ : str = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ : str = torch.tensor(mel[None, None, :] )
snake_case_ : str = torch.nn.functional.interpolate(
_snake_case , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=_snake_case )
snake_case_ : Tuple = mel_shrink[0][0].numpy()
snake_case_ : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ : Dict = len(_snake_case ) - max_length
snake_case_ : Any = np.random.randint(0 , overflow + 1 )
snake_case_ : Optional[int] = waveform[idx : idx + max_length]
snake_case_ : Union[str, Any] = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ : Optional[int] = self._np_extract_fbank_features(_snake_case , self.mel_filters )
snake_case_ : Optional[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ : Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ : Dict = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ : Union[str, Any] = False
else:
snake_case_ : Any = self._random_mel_fusion(_snake_case , _snake_case , _snake_case )
snake_case_ : int = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
snake_case_ : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ : List[Any] = int(max_length / len(_snake_case ) )
snake_case_ : str = np.stack(np.tile(_snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ : int = int(max_length / len(_snake_case ) )
snake_case_ : Union[str, Any] = np.stack(np.tile(_snake_case , _snake_case ) )
snake_case_ : int = np.pad(_snake_case , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
snake_case_ : Tuple = self._np_extract_fbank_features(_snake_case , self.mel_filters )
snake_case_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ : Tuple = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
snake_case_ : List[str] = truncation if truncation is not None else self.truncation
snake_case_ : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case_ : Union[str, Any] = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
snake_case_ : Optional[int] = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : Tuple = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
snake_case_ : Optional[Any] = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Dict = [np.asarray(_snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ : str = [
self._get_input_mel(_snake_case , max_length if max_length else self.nb_max_samples , _snake_case , _snake_case )
for waveform in raw_speech
]
snake_case_ : Optional[int] = []
snake_case_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_snake_case )
is_longer.append(_snake_case )
if truncation == "fusion" and sum(_snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ : Any = np.random.randint(0 , len(_snake_case ) )
snake_case_ : Union[str, Any] = True
if isinstance(input_mel[0] , _snake_case ):
snake_case_ : List[Any] = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ : Any = [[longer] for longer in is_longer]
snake_case_ : str = {"""input_features""": input_mel, """is_longer""": is_longer}
snake_case_ : str = BatchFeature(_snake_case )
if return_tensors is not None:
snake_case_ : List[str] = input_features.convert_to_tensors(_snake_case )
return input_features
| 480 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 316 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase__ : int = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = {}
if "candidate_labels" in kwargs:
__UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = load_image(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase = candidate_labels
__UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
__UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [text_inputs]
return inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase = model_inputs.pop('''candidate_labels''' )
__UpperCamelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = text_inputs[0]
else:
# Batching case.
__UpperCamelCase = text_inputs[0][0]
__UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = model_outputs.pop('''candidate_labels''' )
__UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCamelCase = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [scores]
elif self.framework == "tf":
__UpperCamelCase = stable_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
__UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda SCREAMING_SNAKE_CASE_ : -x[0] )
]
return result
| 451 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase__ : Union[str, Any] = getLogger(__name__)
def A_ ( snake_case : str , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : int = 1024 , snake_case : Dict="val" , snake_case : Tuple=None , snake_case : int=False , snake_case : Union[str, Any]="summarization" , snake_case : Optional[Any]=None , snake_case : List[str]=1 , snake_case : Dict = None , snake_case : Optional[Any]="" , **snake_case : Tuple , ) -> Dict:
'''simple docstring'''
__UpperCamelCase = str(snake_case )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=snake_case )
__UpperCamelCase = Path(snake_case )
__UpperCamelCase = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).cuda()
if fpaa:
__UpperCamelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case , snake_case ) # update config with task specific params
__UpperCamelCase = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCamelCase = num_return_sequences
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCamelCase = tokenizer.model_max_length
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCamelCase = SeqaSeqDataset(
snake_case , snake_case , snake_case , max_target_length=1024 , type_path=snake_case , n_obs=snake_case , prefix=snake_case , **snake_case , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCamelCase = ds.make_sortish_sampler(snake_case , distributed=snake_case , add_extra_examples=snake_case , shuffle=snake_case )
__UpperCamelCase = DataLoader(snake_case , sampler=snake_case , batch_size=snake_case , collate_fn=ds.collate_fn )
__UpperCamelCase = []
for batch in tqdm(snake_case ):
__UpperCamelCase = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=snake_case , num_beams=snake_case , **snake_case , )
__UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
__UpperCamelCase = batch['''ids''']
if num_return_sequences > 1:
__UpperCamelCase = chunks(snake_case , snake_case ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(snake_case , snake_case )
return results, sampler.num_replicas
def A_ ( ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=snake_case , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=snake_case , default=snake_case )
parser.add_argument(
'''--type_path''' , type=snake_case , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=snake_case , default=-1 , required=snake_case , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=snake_case , default=snake_case , required=snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=snake_case , default=1 , required=snake_case , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=snake_case , default=600 , required=snake_case , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=snake_case , default=snake_case , required=snake_case )
parser.add_argument('''--tgt_lang''' , type=snake_case , default=snake_case , required=snake_case )
parser.add_argument(
'''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCamelCase = time.time()
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
__UpperCamelCase = Path(args.save_dir + '''_tmp''' )
Path(snake_case ).mkdir(exist_ok=snake_case ) # this handles locking.
__UpperCamelCase = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCamelCase = {}
if args.src_lang is not None:
__UpperCamelCase = args.src_lang
if args.tgt_lang is not None:
__UpperCamelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case )
__UpperCamelCase , __UpperCamelCase = eval_data_dir(
args.data_dir , snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case , **snake_case , )
if args.local_rank <= 0:
__UpperCamelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case )
__UpperCamelCase = gather_results_from_each_node(snake_case , snake_case , args.sync_timeout )
__UpperCamelCase = combine_partial_results(snake_case )
if args.num_return_sequences > 1:
__UpperCamelCase = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(snake_case , snake_case )
return
__UpperCamelCase = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(snake_case ) as f:
__UpperCamelCase = [x.rstrip() for x in f.readlines()][: len(snake_case )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCamelCase = '''translation''' in args.task
__UpperCamelCase = calculate_bleu if calc_bleu else calculate_rouge
__UpperCamelCase = '''bleu''' if calc_bleu else '''rouge'''
__UpperCamelCase = score_fn(snake_case , snake_case )
__UpperCamelCase = len(snake_case )
__UpperCamelCase = time.time() - start_time
__UpperCamelCase = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCamelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCamelCase = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(snake_case , snake_case , indent=snake_case )
print(snake_case )
write_txt_file(snake_case , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(snake_case , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(snake_case )
def A_ ( snake_case : Optional[int] ) -> List:
'''simple docstring'''
__UpperCamelCase = []
for partial_result in partial_results:
records.extend(snake_case )
__UpperCamelCase = sorted(snake_case , key=lambda snake_case : x["id"] )
__UpperCamelCase = [x['''pred'''] for x in records]
return preds
def A_ ( snake_case : List[str] , snake_case : int , snake_case : str ) -> List[Dict[str, List]]:
'''simple docstring'''
__UpperCamelCase = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCamelCase = None
while (time.time() - start_wait) < timeout:
__UpperCamelCase = list(save_dir.glob('''rank_*.json''' ) )
if len(snake_case ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCamelCase = lmap(snake_case , snake_case )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 451 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCAmelCase = datasets.load_iris()
_lowerCAmelCase = np.array(data["data"])
_lowerCAmelCase = np.array(data["target"])
_lowerCAmelCase = data["target_names"]
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase = train_test_split(X, y)
def _snake_case ( __snake_case , __snake_case ):
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=5 ):
_UpperCamelCase = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
_UpperCamelCase = []
for data_point in data:
_UpperCamelCase = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCamelCase = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCamelCase = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a_ :
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return self.get_dummy_input()
@property
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Any=False , ):
"""simple docstring"""
snake_case : Union[str, Any] = 4
snake_case : List[str] = 32
snake_case : Tuple = (32, 32)
snake_case : Any = torch.manual_seed(0 )
snake_case : Tuple = torch.device(UpperCAmelCase__ )
snake_case : int = (batch_size, num_channels) + sizes
snake_case : Tuple = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : List[str] = {'''hidden_states''': hidden_states}
if include_temb:
snake_case : Optional[int] = 128
snake_case : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
if include_res_hidden_states_tuple:
snake_case : Optional[Any] = torch.manual_seed(1 )
snake_case : int = (randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ ),)
if include_encoder_hidden_states:
snake_case : Dict = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase__ )
if include_skip_sample:
snake_case : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase__ , device=UpperCAmelCase__ )
return dummy_input
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[int] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
snake_case : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
snake_case : Tuple = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.prepare_init_args_and_inputs_for_common()
snake_case : List[str] = self.block_class(**UpperCAmelCase__ )
unet_block.to(UpperCAmelCase__ )
unet_block.eval()
with torch.no_grad():
snake_case : Optional[int] = unet_block(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Optional[Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case : List[str] = output[0, -1, -3:, -3:]
snake_case : Optional[Any] = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase__ , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case , snake_case : Any = self.prepare_init_args_and_inputs_for_common()
snake_case : Optional[int] = self.block_class(**UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
snake_case : int = model(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Optional[int] = output[0]
snake_case : Dict = torch.device(UpperCAmelCase__ )
snake_case : Tuple = randn_tensor(output.shape , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = torch.nn.functional.mse_loss(UpperCAmelCase__ , UpperCAmelCase__ )
loss.backward()
| 84 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase__ : Optional[Any] = logging.get_logger('''transformers.models.speecht5''')
def __lowercase ( _a , _a , _a ):
hf_model.apply_weight_norm()
snake_case_ : Optional[int] = checkpoint['''input_conv.weight_g''']
snake_case_ : Tuple = checkpoint['''input_conv.weight_v''']
snake_case_ : Dict = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
snake_case_ : List[Any] = checkpoint[f"upsamples.{i}.1.weight_g"]
snake_case_ : Tuple = checkpoint[f"upsamples.{i}.1.weight_v"]
snake_case_ : Optional[Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case_ : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
snake_case_ : Optional[int] = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
snake_case_ : List[str] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
snake_case_ : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
snake_case_ : str = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
snake_case_ : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
snake_case_ : List[Any] = checkpoint['''output_conv.1.weight_g''']
snake_case_ : Tuple = checkpoint['''output_conv.1.weight_v''']
snake_case_ : Dict = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowercase ( _a , _a , _a , _a=None , _a=None , ):
if config_path is not None:
snake_case_ : Dict = SpeechTaHifiGanConfig.from_pretrained(_a )
else:
snake_case_ : Tuple = SpeechTaHifiGanConfig()
snake_case_ : Tuple = SpeechTaHifiGan(_a )
snake_case_ : Union[str, Any] = torch.load(_a )
load_weights(orig_checkpoint['''model''']['''generator'''] , _a , _a )
snake_case_ : str = np.load(_a )
snake_case_ : int = stats[0].reshape(-1 )
snake_case_ : Tuple = stats[1].reshape(-1 )
snake_case_ : str = torch.from_numpy(_a ).float()
snake_case_ : Dict = torch.from_numpy(_a ).float()
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(_a )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase__ : Dict = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 123 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : int , lowercase_ : Optional[Any]=None , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Any ):
if tokenize_kwargs is None:
snake_case_ : str = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
snake_case_ : int = truncation
snake_case_ : Union[str, Any] = tokenize_kwargs
snake_case_ : int = {}
if return_tensors is not None:
snake_case_ : str = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , **lowercase_ : int ):
snake_case_ : Union[str, Any] = self.framework
snake_case_ : List[Any] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
return model_inputs
def _snake_case ( self : Union[str, Any] , lowercase_ : Tuple ):
snake_case_ : Union[str, Any] = self.model(**lowercase_ )
return model_outputs
def _snake_case ( self : str , lowercase_ : str , lowercase_ : List[str]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *lowercase_ : int , **lowercase_ : Dict ):
return super().__call__(*lowercase_ , **lowercase_ )
| 123 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE =["model.decoder.embed_positions.weights"]
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
if "emb" in name:
lowercase_ : Dict = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
lowercase_ : int = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
lowercase_ : int = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
lowercase_ : List[str] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
lowercase_ : Tuple = name.replace('linear2' , 'fc2' )
if "norm1" in name:
lowercase_ : List[Any] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
lowercase_ : List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
lowercase_ : str = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
lowercase_ : List[str] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
lowercase_ : str = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase_ : Union[str, Any] = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowercase__( __SCREAMING_SNAKE_CASE : OrderedDict , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Union[str, Any] = list(state_dict.keys() )
lowercase_ : Dict = {}
for key in keys:
lowercase_ : str = state_dict.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase_ : str = val[:hidden_size, :]
lowercase_ : Dict = val[hidden_size : 2 * hidden_size, :]
lowercase_ : Tuple = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase_ : Any = val
else:
lowercase_ : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if checkpoint == "small":
# default config values
lowercase_ : List[str] = 10_24
lowercase_ : str = 24
lowercase_ : Tuple = 16
elif checkpoint == "medium":
lowercase_ : Optional[int] = 15_36
lowercase_ : Any = 48
lowercase_ : Dict = 24
elif checkpoint == "large":
lowercase_ : int = 20_48
lowercase_ : Tuple = 48
lowercase_ : Tuple = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase_ : Dict = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int="cpu" ):
lowercase_ : List[Any] = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = fairseq_model.lm.state_dict()
lowercase_ , lowercase_ : Optional[Any] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
lowercase_ : Optional[Any] = TaEncoderModel.from_pretrained('t5-base' )
lowercase_ : int = EncodecModel.from_pretrained('facebook/encodec_32khz' )
lowercase_ : List[Any] = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase_ , lowercase_ : Any = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase_ : Optional[int] = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
lowercase_ : List[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase_ : List[str] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('t5-base' )
lowercase_ : Dict = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
lowercase_ : int = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
lowercase_ : Union[str, Any] = 20_48
lowercase_ : Optional[Any] = 20_48
# set other default generation config params
lowercase_ : Any = int(30 * audio_encoder.config.frame_rate )
lowercase_ : Tuple = True
lowercase_ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 425 | """simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = 0
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__UpperCamelCase ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__UpperCamelCase ) ,0 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
# Check that tokenizer_type ≠ model_type
lowercase_ : List[str] = AutoTokenizer.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' ,os.path.join(__UpperCamelCase ,'vocab.txt' ) )
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='bert' ,use_fast=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' ,os.path.join(__UpperCamelCase ,'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' ,os.path.join(__UpperCamelCase ,'merges.txt' ) )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='gpt2' ,use_fast=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' ,os.path.join(__UpperCamelCase ,'vocab.txt' ) )
lowercase_ : str = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='bert' )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' ,os.path.join(__UpperCamelCase ,'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' ,os.path.join(__UpperCamelCase ,'merges.txt' ) )
lowercase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='gpt2' )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
with pytest.raises(__UpperCamelCase ):
AutoTokenizer.from_pretrained('./' ,tokenizer_type='xxx' )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase_ : Optional[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,__UpperCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case ,__UpperCamelCase )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__UpperCamelCase ,'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' ,):
lowercase_ : Optional[Any] = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = TOKENIZER_MAPPING.values()
lowercase_ : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__UpperCamelCase )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ,use_fast=__UpperCamelCase ) ,__UpperCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) ,__UpperCamelCase )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = AutoTokenizer.from_pretrained('distilbert-base-uncased' ,do_lower_case=__UpperCamelCase )
lowercase_ : Optional[int] = 'Hello, world. How are you?'
lowercase_ : Optional[int] = tokenizer.tokenize(__UpperCamelCase )
self.assertEqual('[UNK]' ,tokens[0] )
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('microsoft/mpnet-base' ,do_lower_case=__UpperCamelCase )
lowercase_ : Tuple = tokenizer.tokenize(__UpperCamelCase )
self.assertEqual('[UNK]' ,tokens[0] )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,3_0000 )
self.assertEqual(tokenizer.unk_token ,'[UNK]' )
self.assertEqual(tokenizer.padding_side ,'right' )
self.assertEqual(tokenizer.truncation_side ,'right' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : List[str] = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Union[str, Any] = get_tokenizer_config('bert-base-cased' )
lowercase_ : Union[str, Any] = config.pop('_commit_hash' ,__UpperCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__UpperCamelCase ,{'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase_ : Any = get_tokenizer_config(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : int = get_tokenizer_config(__UpperCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] ,'BertTokenizer' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register('custom' ,__UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
lowercase_ : List[str] = CustomTokenizer.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register('custom' ,__UpperCamelCase )
# Can register in two steps
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoTokenizer.register(__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : List[str] = BertTokenizerFast.from_pretrained(__UpperCamelCase )
bert_tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Dict = CustomTokenizerFast.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
lowercase_ : Tuple = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
lowercase_ : int = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : int = AutoTokenizer.from_pretrained(__UpperCamelCase ,trust_remote_code=__UpperCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
lowercase_ : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'NewTokenizer' )
@require_tokenizers
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
class UpperCamelCase ( lowercase_ ):
lowercase = False
class UpperCamelCase ( lowercase_ ):
lowercase = NewTokenizer
lowercase = False
try:
AutoConfig.register('custom' ,__UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
# If remote code is not set, the default is to use local
lowercase_ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase_ : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ,use_fast=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase_ : List[str] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' ,trust_remote_code=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
lowercase_ : Tuple = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase ,'bert-base is not a local folder and is not a valid model identifier' ):
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('bert-base' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase ,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase ,revision='aaaaaa' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 425 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = KandinskyVaaPipeline
lowerCAmelCase__ = [
'image_embeds',
'negative_image_embeds',
]
lowerCAmelCase__ = ['image_embeds', 'negative_image_embeds']
lowerCAmelCase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase__ = False
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase__ : Any = UNetaDConditionModel(**_A )
return model
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.dummy_unet
UpperCAmelCase__ : int = self.dummy_movq
UpperCAmelCase__ : str = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_A , )
UpperCAmelCase__ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : Dict , _A : Dict , _A : List[str]=0 ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : List[Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''cpu'''
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**_A )
UpperCAmelCase__ : str = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Any = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Union[str, Any] = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase__ : Optional[Any] = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[Any] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : int = '''red cat, 4k photo'''
UpperCAmelCase__ : Dict = torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipeline(
image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A , _A )
| 312 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = LxmertTokenizer
lowerCAmelCase__ = LxmertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : Any ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self : List[Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase__ : str = '''unwanted, running'''
return input_text, output_text
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def lowercase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : str = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : Tuple = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Tuple = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase__ : Tuple = tokenizer.encode(_A )
UpperCAmelCase__ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
| 312 | 1 |
'''simple docstring'''
import functools
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Validation
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_snake_case = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 585 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
A__ : List[str] = '''maskformer-swin'''
A__ : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , __lowerCamelCase : Tuple=2_2_4 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Tuple=9_6 , __lowerCamelCase : List[str]=[2, 2, 6, 2] , __lowerCamelCase : int=[3, 6, 1_2, 2_4] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Union[str, Any]=4.0 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int="gelu" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=1E-5 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(__lowerCamelCase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
_snake_case = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 404 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> str:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''''''
else:
_snake_case = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def snake_case ( lowerCAmelCase_ ) -> Any:
_snake_case = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = dct.pop(lowerCAmelCase_ )
_snake_case = val
def snake_case ( ) -> List[Any]:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Any:
_snake_case = ViTConfig()
# patch_size
if model_name[-1] == "8":
_snake_case = 8
# set labels if required
if not base_model:
_snake_case = 1000
_snake_case = '''huggingface/label-files'''
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_snake_case = 384
_snake_case = 1536
_snake_case = 12
_snake_case = 6
# load original model from torch hub
_snake_case = torch.hub.load('''facebookresearch/dino:main''' , lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
_snake_case = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
if base_model:
_snake_case = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ ).eval()
else:
_snake_case = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
_snake_case = ViTImageProcessor()
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
_snake_case = encoding['''pixel_values''']
_snake_case = model(lowerCAmelCase_ )
if base_model:
_snake_case = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_snake_case = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
snake_case = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 404 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a_ = """http://www.mocksite.com/file1.txt"""
a_ = """\"text\": [\"foo\", \"foo\"]"""
a_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __lowerCAmelCase :
lowerCAmelCase__ = 2_0_0
lowerCAmelCase__ = {"""Content-Length""": """100"""}
lowerCAmelCase__ = {}
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return [bytes(__lowercase , '''utf-8''' )]
def a__ ( *_UpperCamelCase : Optional[int] ,**_UpperCamelCase : List[str] ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''' ,[str, list, dict] )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ):
import requests
monkeypatch.setattr(UpperCamelCase__ ,'''request''' ,UpperCamelCase__ )
__lowerCamelCase = URL
if issubclass(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = url
elif issubclass(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = [url]
elif issubclass(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = {'''train''': url}
__lowerCamelCase = '''dummy'''
__lowerCamelCase = '''downloads'''
__lowerCamelCase = tmp_path
__lowerCamelCase = DownloadConfig(
cache_dir=os.path.join(UpperCamelCase__ ,UpperCamelCase__ ) ,use_etag=UpperCamelCase__ ,)
__lowerCamelCase = DownloadManager(dataset_name=UpperCamelCase__ ,download_config=UpperCamelCase__ )
__lowerCamelCase = dl_manager.download(UpperCamelCase__ )
__lowerCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = [downloaded_paths]
__lowerCamelCase = [urls]
elif isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
assert "train" in downloaded_paths.keys()
__lowerCamelCase = downloaded_paths.values()
__lowerCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(UpperCamelCase__ ,UpperCamelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCamelCase = Path(UpperCamelCase__ )
__lowerCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCamelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCamelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
__lowerCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' ,[str, list, dict] )
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ):
__lowerCamelCase = str(UpperCamelCase__ )
if issubclass(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = filename
elif issubclass(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = [filename]
elif issubclass(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = {'''train''': filename}
__lowerCamelCase = '''dummy'''
__lowerCamelCase = xz_file.parent
__lowerCamelCase = '''extracted'''
__lowerCamelCase = DownloadConfig(
cache_dir=UpperCamelCase__ ,use_etag=UpperCamelCase__ ,)
__lowerCamelCase = DownloadManager(dataset_name=UpperCamelCase__ ,download_config=UpperCamelCase__ )
__lowerCamelCase = dl_manager.extract(UpperCamelCase__ )
__lowerCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
__lowerCamelCase = [extracted_paths]
__lowerCamelCase = [paths]
elif isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
assert "train" in extracted_paths.keys()
__lowerCamelCase = extracted_paths.values()
__lowerCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(UpperCamelCase__ ,UpperCamelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCamelCase = Path(UpperCamelCase__ )
__lowerCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(UpperCamelCase__ ,etag=UpperCamelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCamelCase = extracted_path.read_text()
__lowerCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : int ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(UpperCamelCase__ ,start=1 ):
__lowerCamelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' ,['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : int ):
__lowerCamelCase = request.getfixturevalue(UpperCamelCase__ )
__lowerCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(UpperCamelCase__ ) ,start=1 ):
_test_jsonl(UpperCamelCase__ ,UpperCamelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' ,['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ):
__lowerCamelCase = request.getfixturevalue(UpperCamelCase__ )
__lowerCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(UpperCamelCase__ ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(UpperCamelCase__ ) ,start=1 ):
_test_jsonl(UpperCamelCase__ ,UpperCamelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(UpperCamelCase__ ) ,start=1 ):
assert os.path.basename(UpperCamelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 175 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 296 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : List[str] = 'deta'
_A : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowerCamelCase=None , lowerCamelCase=9_00 , lowerCamelCase=20_48 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=10_24 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=2_56 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0_2 , lowerCamelCase=1.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=True , lowerCamelCase=3_00 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=0.2_5 , **lowerCamelCase , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case__ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ = backbone_config.pop("model_type" )
snake_case__ = CONFIG_MAPPING[backbone_model_type]
snake_case__ = config_class.from_dict(lowerCamelCase )
snake_case__ = backbone_config
snake_case__ = num_queries
snake_case__ = max_position_embeddings
snake_case__ = d_model
snake_case__ = encoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = encoder_attention_heads
snake_case__ = decoder_ffn_dim
snake_case__ = decoder_layers
snake_case__ = decoder_attention_heads
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = init_xavier_std
snake_case__ = encoder_layerdrop
snake_case__ = auxiliary_loss
snake_case__ = position_embedding_type
# deformable attributes
snake_case__ = num_feature_levels
snake_case__ = encoder_n_points
snake_case__ = decoder_n_points
snake_case__ = two_stage
snake_case__ = two_stage_num_proposals
snake_case__ = with_box_refine
snake_case__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
snake_case__ = class_cost
snake_case__ = bbox_cost
snake_case__ = giou_cost
# Loss coefficients
snake_case__ = mask_loss_coefficient
snake_case__ = dice_loss_coefficient
snake_case__ = bbox_loss_coefficient
snake_case__ = giou_loss_coefficient
snake_case__ = eos_coefficient
snake_case__ = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def A_ ( self ):
return self.encoder_attention_heads
@property
def A_ ( self ):
return self.d_model
def A_ ( self ):
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.backbone_config.to_dict()
snake_case__ = self.__class__.model_type
return output
| 530 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = SwinConfig()
snake_case__ = swin_name.split("_" )
snake_case__ = name_split[1]
snake_case__ = int(name_split[4] )
snake_case__ = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ = 96
snake_case__ = (2, 2, 6, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ = 96
snake_case__ = (2, 2, 18, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ = 128
snake_case__ = (2, 2, 18, 2)
snake_case__ = (4, 8, 16, 32)
else:
snake_case__ = 192
snake_case__ = (2, 2, 18, 2)
snake_case__ = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ = 21_841
else:
snake_case__ = 1_000
snake_case__ = "huggingface/label-files"
snake_case__ = "imagenet-1k-id2label.json"
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = img_size
snake_case__ = num_classes
snake_case__ = embed_dim
snake_case__ = depths
snake_case__ = num_heads
snake_case__ = window_size
return config
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if "patch_embed.proj" in name:
snake_case__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case__ = "encoder." + name
if "attn.proj" in name:
snake_case__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case__ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
snake_case__ = "layernorm.weight"
if name == "norm.bias":
snake_case__ = "layernorm.bias"
if "head" in name:
snake_case__ = name.replace("head" , "classifier" )
else:
snake_case__ = "swin." + name
return name
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ = key.split("." )
snake_case__ = int(key_split[1] )
snake_case__ = int(key_split[3] )
snake_case__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[
dim : dim * 2, :
]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[
:dim
]
snake_case__ = val[
dim : dim * 2
]
snake_case__ = val[
-dim:
]
else:
snake_case__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
snake_case__ = get_swin_config(__lowerCAmelCase )
snake_case__ = SwinForImageClassification(__lowerCAmelCase )
model.eval()
snake_case__ = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
snake_case__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
snake_case__ = image_processor(images=__lowerCAmelCase , return_tensors="pt" )
snake_case__ = timm_model(inputs["pixel_values"] )
snake_case__ = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 530 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A : Dict = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
lowercase__ = XLNetConfig.from_json_file(__magic_name__ )
lowercase__ = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowercase__ = finetuning_task
lowercase__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase__ = XLNetForSequenceClassification(__magic_name__ )
elif "squad" in finetuning_task:
lowercase__ = finetuning_task
lowercase__ = XLNetForQuestionAnswering(__magic_name__ )
else:
lowercase__ = XLNetLMHeadModel(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
print(f'''Save PyTorch model to {os.path.abspath(__magic_name__ )}''' )
torch.save(model.state_dict() , __magic_name__ )
print(f'''Save configuration file to {os.path.abspath(__magic_name__ )}''' )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
A : Any = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE = 10
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**A )
return config
def _UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def _UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A ,beta_end=A )
def _UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A ,use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 341 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase = ''
_lowerCAmelCase = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase = [1 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase = j - k + 1 # noqa: E741
_lowerCAmelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase = length[j]
_lowerCAmelCase = j
# create that string
_lowerCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
UpperCamelCase = {
'google/rembert': 256,
}
UpperCamelCase = '▁'
class _A ( UpperCAmelCase_ ):
lowercase_ : str = VOCAB_FILES_NAMES
lowercase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = RemBertTokenizer
def __init__( self : Dict , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : int="[CLS]" , lowerCamelCase__ : Optional[Any]="[SEP]" , lowerCamelCase__ : Tuple="<unk>" , lowerCamelCase__ : Tuple="[SEP]" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : List[str]="[MASK]" , **lowerCamelCase__ : Optional[int] , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : List[Any] = do_lower_case
__UpperCamelCase : List[str] = remove_space
__UpperCamelCase : List[str] = keep_accents
__UpperCamelCase : Any = vocab_file
__UpperCamelCase : Union[str, Any] = False if not self.vocab_file else True
def a ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCamelCase : List[str] = [self.sep_token_id]
__UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def a ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [self.sep_token_id]
__UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
__UpperCamelCase : str = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 269 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
set_seed(770)
UpperCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCamelCase = os.path.dirname(os.path.abspath(__file__))
UpperCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
UpperCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=False ) -> int:
__UpperCamelCase : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCAmelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
hf_hub_download(repo_id=__lowerCAmelCase , filename=__lowerCAmelCase , local_dir=__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Tuple="text" ) -> Optional[Any]:
if model_type == "text":
__UpperCamelCase : List[str] = BarkSemanticModel
__UpperCamelCase : Dict = BarkSemanticConfig
__UpperCamelCase : str = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase : List[str] = BarkCoarseModel
__UpperCamelCase : int = BarkCoarseConfig
__UpperCamelCase : int = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase : List[Any] = BarkFineModel
__UpperCamelCase : Dict = BarkFineConfig
__UpperCamelCase : int = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase : List[str] = f'{model_type}_small' if use_small else model_type
__UpperCamelCase : Dict = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCAmelCase ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
__UpperCamelCase : Union[str, Any] = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
# this is a hack
__UpperCamelCase : Any = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
__UpperCamelCase : Tuple = model_args["""vocab_size"""]
__UpperCamelCase : List[str] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase : Tuple = model_args.pop("""n_head""" )
__UpperCamelCase : Tuple = model_args.pop("""n_embd""" )
__UpperCamelCase : Union[str, Any] = model_args.pop("""n_layer""" )
__UpperCamelCase : List[Any] = ConfigClass(**checkpoint["""model_args"""] )
__UpperCamelCase : Optional[int] = ModelClass(config=__lowerCAmelCase )
__UpperCamelCase : Dict = GenerationConfigClass()
__UpperCamelCase : Any = model_generation_config
__UpperCamelCase : str = checkpoint["""model"""]
# fixup checkpoint
__UpperCamelCase : Dict = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase : List[str] = k[len(__lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase : List[Any] = new_k.replace(__lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__UpperCamelCase : Optional[Any] = state_dict.pop(__lowerCAmelCase )
__UpperCamelCase : int = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase : List[Any] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
__UpperCamelCase : Dict = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase : Optional[int] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__lowerCAmelCase ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(__lowerCAmelCase ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
__UpperCamelCase : Tuple = model.num_parameters(exclude_embeddings=__lowerCAmelCase )
__UpperCamelCase : Optional[int] = checkpoint["""best_val_loss"""].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowerCAmelCase , 3 )} loss' )
model.eval()
model.to(__lowerCAmelCase )
del checkpoint, state_dict
return model
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple="text" ) -> Tuple:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase : str = """cpu""" # do conversion on cpu
__UpperCamelCase : str = _get_ckpt_path(__lowerCAmelCase , use_small=__lowerCAmelCase )
__UpperCamelCase : Optional[int] = _load_model(__lowerCAmelCase , __lowerCAmelCase , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
# load bark initial model
__UpperCamelCase : Dict = _bark_load_model(__lowerCAmelCase , """cpu""" , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
if model_type == "text":
__UpperCamelCase : Dict = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
__UpperCamelCase : List[str] = 5
__UpperCamelCase : List[Any] = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase : List[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase : List[str] = bark_model(__lowerCAmelCase )[0]
__UpperCamelCase : Dict = model(__lowerCAmelCase )
# take last logits
__UpperCamelCase : Union[str, Any] = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase : Optional[int] = 3
__UpperCamelCase : Tuple = 8
__UpperCamelCase : int = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase : Union[str, Any] = model(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : Tuple = bark_model(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : Dict = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , ) -> int:
__UpperCamelCase : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : str = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCAmelCase , """config.json""" ) )
__UpperCamelCase : List[str] = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCAmelCase , """config.json""" ) )
__UpperCamelCase : List[Any] = BarkFineConfig.from_pretrained(os.path.join(__lowerCAmelCase , """config.json""" ) )
__UpperCamelCase : List[str] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
__UpperCamelCase : Optional[int] = BarkSemanticModel.from_pretrained(__lowerCAmelCase )
__UpperCamelCase : List[Any] = BarkCoarseModel.from_pretrained(__lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = BarkFineModel.from_pretrained(__lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
__UpperCamelCase : Dict = BarkConfig.from_sub_model_configs(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase : Optional[int] = BarkModel(__lowerCAmelCase )
__UpperCamelCase : List[Any] = semantic
__UpperCamelCase : Any = coarseAcoustic
__UpperCamelCase : Dict = fineAcoustic
__UpperCamelCase : List[str] = codec
__UpperCamelCase : Optional[Any] = bark_generation_config
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
bark.save_pretrained(__lowerCAmelCase , repo_id=__lowerCAmelCase , push_to_hub=__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 269 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowerCamelCase : List[str] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__lowerCamelCase : Union[str, Any] = {'''facebook/blenderbot_small-90M''': 512}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(A__ )
return pairs
class __snake_case ( __lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[str]="__start__" , _lowercase : Optional[Any]="__end__" , _lowercase : Optional[Any]="__unk__" , _lowercase : Union[str, Any]="__null__" , **_lowercase : int , ):
"""simple docstring"""
super().__init__(unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , **UpperCamelCase_ )
with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
@property
def __a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __a ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _lowercase : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = re.sub("""([.,!?()])""" , R""" \1""" , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = re.sub("""(')""" , R""" \1 """ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = re.sub(R"""\s{2,}""" , """ """ , UpperCamelCase_ )
if "\n" in token:
SCREAMING_SNAKE_CASE__ = token.replace("""\n""" , """ __newln__""" )
SCREAMING_SNAKE_CASE__ = token.split(""" """ )
SCREAMING_SNAKE_CASE__ = []
for token in tokens:
if not len(UpperCamelCase_ ):
continue
SCREAMING_SNAKE_CASE__ = token.lower()
SCREAMING_SNAKE_CASE__ = tuple(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCamelCase_ )
if not pairs:
words.append(UpperCamelCase_ )
continue
while True:
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase_ , key=lambda _lowercase : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(UpperCamelCase_ ):
try:
SCREAMING_SNAKE_CASE__ = word.index(UpperCamelCase_ , UpperCamelCase_ )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = """@@ """.join(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
words.append(UpperCamelCase_ )
return " ".join(UpperCamelCase_ )
def __a ( self : List[str] , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(R"""\S+\n?""" , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(""" """ ) ) )
return split_tokens
def __a ( self : Tuple , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = token.lower()
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def __a ( self : Dict , _lowercase : int ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def __a ( self : Optional[int] , _lowercase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """ """.join(UpperCamelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def __a ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" )
SCREAMING_SNAKE_CASE__ = 0
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(""" """.join(UpperCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 714 | from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(__UpperCamelCase ) / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
A = _symbol_database.Default()
A = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
A = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
A = None
A = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
A = 45
A = 1581
A = 1517
A = 1570
A = 1584
A = 1793
A = 1795
A = 1916
A = 1864
A = 1905
A = 1919
A = 2429
A = 2208
A = 2418
A = 2323
A = 2407
# @@protoc_insertion_point(module_scope)
| 187 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 187 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__( UpperCamelCase_ ):
UpperCAmelCase_ : Tuple = ['''pixel_values''']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
lowerCAmelCase = size if size is not None else {'''shortest_edge''': 224}
lowerCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase)
lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name="""crop_size""")
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
lowerCAmelCase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase)
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""size""" , default_to_square=__lowerCAmelCase)
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" , default_to_square=__lowerCAmelCase)
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(__lowerCAmelCase) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(__lowerCAmelCase) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase) for image in images]
lowerCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
| 719 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 605 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : str , __a : int = None , __a : str = None , __a : Tuple = None , __a : List[Any] = False , __a : List[Any] = False , __a : Union[str, Any] = None , __a : Optional[Any] = None , **__a : List[Any] , ) -> str:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowercase : str = field
__lowercase : int = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowercase : Tuple = Json(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , field=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
if self.streaming:
__lowercase : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase : Optional[Any] = None
__lowercase : List[str] = None
__lowercase : str = None
__lowercase : Tuple = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowercase : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , __a : Dict , __a : Union[str, Any] , __a : int = None , __a : int = None , **__a : Any , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__lowercase : Optional[Any] = dataset
__lowercase : Tuple = path_or_buf
__lowercase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase : int = num_proc
__lowercase : Tuple = '''utf-8'''
__lowercase : Optional[int] = to_json_kwargs
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = self.to_json_kwargs.pop("""path_or_buf""" , SCREAMING_SNAKE_CASE__ )
__lowercase : str = self.to_json_kwargs.pop("""orient""" , """records""" )
__lowercase : str = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__lowercase : Optional[int] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__lowercase : int = self.to_json_kwargs.pop("""compression""" , SCREAMING_SNAKE_CASE__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=SCREAMING_SNAKE_CASE__ ) as buffer:
__lowercase : Optional[int] = self._write(file_obj=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
""" was passed. Please provide a local path instead.""" )
__lowercase : List[str] = self._write(
file_obj=self.path_or_buf , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
return written
def lowerCAmelCase ( self : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = args
__lowercase : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowercase : int = batch.to_pandas().to_json(
path_or_buf=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase ( self : Tuple , __a : List[str] , __a : Any , __a : Any , __a : Optional[Any] , **__a : Dict , ) -> int:
"""simple docstring"""
__lowercase : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__lowercase : Optional[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
else:
__lowercase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
return written | 149 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( a_ : Any , a_ : Union[str, Any]=False ) -> int:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( a_ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE :Dict = {}
__SCREAMING_SNAKE_CASE :Any = r'''.*sequential.(\d+).*'''
__SCREAMING_SNAKE_CASE :Dict = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE :str = key.replace(a_ , a_ )
if re.match(a_ , a_ ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE :Tuple = re.match(a_ , a_ ).group(1 )
__SCREAMING_SNAKE_CASE :str = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(a_ )//3}.linear.''' )
elif re.match(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = int(re.match(a_ , a_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE :Union[str, Any] = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE :Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE :Union[str, Any] = value
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE :str = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE :Dict = query_layer
__SCREAMING_SNAKE_CASE :Tuple = key_layer
__SCREAMING_SNAKE_CASE :str = value_layer
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = value
return model_state_dict
def __lowerCamelCase ( a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : List[Any]=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = init_clap(a_ , enable_fusion=a_ )
clap_model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = clap_model.state_dict()
__SCREAMING_SNAKE_CASE :Tuple = rename_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Any = ClapConfig()
__SCREAMING_SNAKE_CASE :Tuple = enable_fusion
__SCREAMING_SNAKE_CASE :Dict = ClapModel(a_ )
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_ )
model.save_pretrained(a_ )
transformers_config.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 498 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__ : Any = logging.getLogger(__name__)
@dataclass
class snake_case__ :
A__ = 42
A__ = 42
A__ = 42
@dataclass
class snake_case__ :
A__ = 42
A__ = 42
A__ = None
A__ = None
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''train'''
A__ = '''dev'''
A__ = '''test'''
class snake_case__ :
@staticmethod
def A_ ( __a : str , __a : Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A_ ( __a : str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A_ ( __a : List[InputExample] , __a : List[str] , __a : int , __a : PreTrainedTokenizer , __a : Union[str, Any]=False , __a : Union[str, Any]="[CLS]" , __a : List[str]=1 , __a : Optional[Any]="[SEP]" , __a : Optional[Any]=False , __a : int=False , __a : int=0 , __a : Dict=0 , __a : str=-100 , __a : Optional[int]=0 , __a : Union[str, Any]=True , ) -> List[InputFeatures]:
'''simple docstring'''
__snake_case : Tuple = {label: i for i, label in enumerate(__a )}
__snake_case : int = []
for ex_index, example in enumerate(__a ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' , __a , len(__a ) )
__snake_case : Dict = []
__snake_case : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
__snake_case : int = tokenizer.tokenize(__a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__a ) > 0:
tokens.extend(__a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__snake_case : List[Any] = tokenizer.num_special_tokens_to_add()
if len(__a ) > max_seq_length - special_tokens_count:
__snake_case : Tuple = tokens[: (max_seq_length - special_tokens_count)]
__snake_case : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__snake_case : Any = [sequence_a_segment_id] * len(__a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__snake_case : str = [cls_token] + tokens
__snake_case : List[Any] = [pad_token_label_id] + label_ids
__snake_case : Union[str, Any] = [cls_token_segment_id] + segment_ids
__snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__snake_case : Any = [1 if mask_padding_with_zero else 0] * len(__a )
# Zero-pad up to the sequence length.
__snake_case : int = max_seq_length - len(__a )
if pad_on_left:
__snake_case : List[Any] = ([pad_token] * padding_length) + input_ids
__snake_case : Any = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__snake_case : List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
__snake_case : Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(__a ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(__a ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(__a ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(__a ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(__a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : List[str] = None
features.append(
InputFeatures(
input_ids=__a , attention_mask=__a , token_type_ids=__a , label_ids=__a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
A__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : int=False , __a : Split = Split.train , ) -> Optional[int]:
'''simple docstring'''
# Load data features from cache or dataset file
__snake_case : Optional[Any] = os.path.join(
__a , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(__a ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : str = cached_features_file + '.lock'
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__snake_case : Tuple = torch.load(__a )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__snake_case : str = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : Union[str, Any] = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __a )
def __len__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , __a : Optional[Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class snake_case__ :
A__ = 42
A__ = -100
def __init__( self : Tuple , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : List[Any]=False , __a : Split = Split.train , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : int = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : Tuple = tf.data.Dataset.from_generator(
__a , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__snake_case : Any = tf.data.Dataset.from_generator(
__a , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Any ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[Any] , __a : List[str] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 124 |
'''simple docstring'''
import random
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : List[Any] ) -> tuple:
__snake_case , __snake_case , __snake_case : int = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCAmelCase )
elif element > pivot:
greater.append(_UpperCAmelCase )
else:
equal.append(_UpperCAmelCase )
return less, equal, greater
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ) -> int:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_UpperCAmelCase ) or index < 0:
return None
__snake_case : int = items[random.randint(0 ,len(_UpperCAmelCase ) - 1 )]
__snake_case : List[Any] = 0
__snake_case , __snake_case , __snake_case : Any = _partition(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : List[str] = len(_UpperCAmelCase )
__snake_case : int = len(_UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCAmelCase ,_UpperCAmelCase )
# must be in larger
else:
return quick_select(_UpperCAmelCase ,index - (m + count) )
| 124 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_a : Tuple = logging.getLogger(__name__)
_a : Optional[Any] = tf.data.AUTOTUNE
def lowerCamelCase__ ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=SCREAMING_SNAKE_CASE , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=SCREAMING_SNAKE_CASE , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=SCREAMING_SNAKE_CASE , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=SCREAMING_SNAKE_CASE , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=SCREAMING_SNAKE_CASE , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=SCREAMING_SNAKE_CASE , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=SCREAMING_SNAKE_CASE , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=SCREAMING_SNAKE_CASE , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=SCREAMING_SNAKE_CASE , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=SCREAMING_SNAKE_CASE , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=SCREAMING_SNAKE_CASE , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=SCREAMING_SNAKE_CASE , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=SCREAMING_SNAKE_CASE , help='Model ID to upload to on the Hugging Face Hub.' )
UpperCAmelCase = parser.parse_args()
return args
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str ):
try:
if args.tpu_name:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE )
return tpu
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = 0
for file in file_list:
UpperCAmelCase = file.split('/' )[-1]
UpperCAmelCase = re.search(R'-\d+-(\d+)\.tfrecord' , SCREAMING_SNAKE_CASE ).group(1 )
UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str=None ):
UpperCAmelCase = count_samples(SCREAMING_SNAKE_CASE )
UpperCAmelCase = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE )
if shuffle:
UpperCAmelCase = dataset.shuffle(len(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE , num_parallel_reads=SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase = dataset.batch(SCREAMING_SNAKE_CASE , drop_remainder=SCREAMING_SNAKE_CASE )
UpperCAmelCase = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
UpperCAmelCase = dataset.prefetch(SCREAMING_SNAKE_CASE )
return dataset
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] ):
if not args.no_tpu:
UpperCAmelCase = initialize_tpu(SCREAMING_SNAKE_CASE )
UpperCAmelCase = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase = count_samples(SCREAMING_SNAKE_CASE )
UpperCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase , UpperCAmelCase = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE , metrics=['accuracy'] )
def decode_fn(SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE , return_tensors='tf' )
def mask_with_collator(SCREAMING_SNAKE_CASE : List[Any] ):
# TF really needs an isin() function
UpperCAmelCase = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
UpperCAmelCase , UpperCAmelCase = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE , )
return batch
UpperCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , )
UpperCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE ) )
model.fit(
SCREAMING_SNAKE_CASE , validation_data=SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_a : Any = parse_args()
main(args)
| 447 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 'ylacombe/bark-small'
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 'en_speaker_1'
UpperCAmelCase = 'This is a test string'
UpperCAmelCase = 'speaker_embeddings_path.json'
UpperCAmelCase = 'speaker_embeddings'
def snake_case_ ( self , **a_ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase = 3_5
UpperCAmelCase = 2
UpperCAmelCase = 8
UpperCAmelCase = {
'semantic_prompt': np.ones(a_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(a_ , **a_ )
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
UpperCAmelCase = processor(text=self.input_string )
UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=a_ , return_attention_mask=a_ , return_token_type_ids=a_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 447 | 1 |
import requests
from bsa import BeautifulSoup
def _lowercase ( a__ : str = "https://www.worldometers.info/coronavirus" ) -> str:
"""simple docstring"""
_UpperCamelCase = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_UpperCamelCase = soup.findAll("h1" )
_UpperCamelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 700 |
def _lowercase ( a__ : int , a__ : int ) -> float:
"""simple docstring"""
return base * power(a__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
__lowerCAmelCase = int(input("""Enter the base: """).strip())
__lowerCAmelCase = int(input("""Enter the exponent: """).strip())
__lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__lowerCAmelCase = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 589 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 |
import math
def snake_case_ (__A : int = 1_0_0 ) -> int:
__lowerCAmelCase : List[str] = sum(i * i for i in range(1 , n + 1 ) )
__lowerCAmelCase : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 651 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __UpperCamelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = jnp.floataa
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i in range(self.num_layers ):
UpperCAmelCase_ = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase_ = resnets
UpperCAmelCase_ = attentions
if self.add_downsample:
UpperCAmelCase_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int=True ):
'''simple docstring'''
UpperCAmelCase_ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase_ = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase_ = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = jnp.floataa
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(self.num_layers ):
UpperCAmelCase_ = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase_ = resnets
if self.add_downsample:
UpperCAmelCase_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : int=True ):
'''simple docstring'''
UpperCAmelCase_ = ()
for resnet in self.resnets:
UpperCAmelCase_ = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = jnp.floataa
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i in range(self.num_layers ):
UpperCAmelCase_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase_ = resnets
UpperCAmelCase_ = attentions
if self.add_upsample:
UpperCAmelCase_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Any=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase_ = res_hidden_states_tuple[-1]
UpperCAmelCase_ = res_hidden_states_tuple[:-1]
UpperCAmelCase_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase_ = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase_ = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase_ = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = jnp.floataa
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(self.num_layers ):
UpperCAmelCase_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase_ = resnets
if self.add_upsample:
UpperCAmelCase_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ = res_hidden_states_tuple[-1]
UpperCAmelCase_ = res_hidden_states_tuple[:-1]
UpperCAmelCase_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase_ = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase_ = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = jnp.floataa
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase_ = []
for _ in range(self.num_layers ):
UpperCAmelCase_ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase_ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase_ = resnets
UpperCAmelCase_ = attentions
def __call__( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=True ):
'''simple docstring'''
UpperCAmelCase_ = self.resnets[0](lowerCAmelCase , lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase_ = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase_ = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
return hidden_states | 268 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __lowerCAmelCase ( A = True , *A , **A ):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
UpperCAmelCase_ = False
if main_process_only:
UpperCAmelCase_ = PartialState().local_process_index == 0
return _tqdm(*A , **A , disable=A ) | 268 | 1 |
def UpperCamelCase__( )->list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
a__: Optional[int] = generate_large_matrix()
a__: Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] )->None:
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def UpperCamelCase__( UpperCamelCase__ : list[int] )->int:
A__ = 0
A__ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
A__ = (left + right) // 2
A__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
A__ = mid + 1
else:
A__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] )->int:
A__ = 0
A__ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
A__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] )->int:
return len([number for row in grid for number in row if number < 0] )
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] )->int:
A__ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def UpperCamelCase__( )->None:
from timeit import timeit
print('''Running benchmarks''' )
A__ = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
A__ = timeit(f"{func}(grid=grid)" , setup=UpperCamelCase__ , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 190 |
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase__ ) == 26
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase__( )->None:
from timeit import timeit
A__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=UpperCamelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=UpperCamelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=UpperCamelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 190 | 1 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int = 10, _UpperCAmelCase : int = 1000, _UpperCAmelCase : bool = True):
assert (
isinstance(_UpperCAmelCase, _UpperCAmelCase)
and isinstance(_UpperCAmelCase, _UpperCAmelCase)
and isinstance(_UpperCAmelCase, _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''')
return min_val if option else max_val
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : int):
return int((number_a + number_a) / 2)
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : int):
assert (
isinstance(_UpperCAmelCase, _UpperCAmelCase) and isinstance(_UpperCAmelCase, _UpperCAmelCase) and isinstance(_UpperCAmelCase, _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''')
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''')
def answer(_UpperCAmelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''')
UpperCamelCase = lower
UpperCamelCase = higher
UpperCamelCase = []
while True:
UpperCamelCase = get_avg(_UpperCAmelCase, _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
UpperCamelCase = number
elif answer(_UpperCAmelCase) == "high":
UpperCamelCase = number
else:
break
print(f'guess the number : {last_numbers[-1]}')
print(f'details : {last_numbers!s}')
def __snake_case ( ):
UpperCamelCase = int(input('''Enter lower value : ''').strip())
UpperCamelCase = int(input('''Enter high value : ''').strip())
UpperCamelCase = int(input('''Enter value to guess : ''').strip())
guess_the_number(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
if __name__ == "__main__":
main()
| 350 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Tuple = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = PegasusTokenizer
_snake_case = PegasusTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = PegasusTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''</s>'''
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_1_0_3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCamelCase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
UpperCamelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
UpperCamelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
UpperCamelCase = '''To ensure a smooth flow of bank resolutions.'''
UpperCamelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
UpperCamelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
UpperCamelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCamelCase = self._large_tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
UpperCamelCase = self._large_tokenizer(
text_target=lowerCamelCase__ , max_length=5 , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = PegasusTokenizer
_snake_case = PegasusTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = PegasusTokenizer(lowerCamelCase__ , offset=0 , mask_token_sent=lowerCamelCase__ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
UpperCamelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCamelCase = self._large_tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
UpperCamelCase = self._large_tokenizer(
text_target=lowerCamelCase__ , max_length=5 , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask.
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
UpperCamelCase = self._large_tokenizer(lowerCamelCase__ ).input_ids
self.assertListEqual(
lowerCamelCase__ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 350 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase__ = False
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = "A painting of a squirrel eating a burger "
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_ )
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = generator.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase ( self ):
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = "A painting of a squirrel eating a burger "
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
snake_case_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 508 |
'''simple docstring'''
def __snake_case ( lowercase : int ):
snake_case_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __snake_case ( lowercase : int ):
snake_case_ = 0
while number > 0:
snake_case_ = number % 10
sum_of_digits += last_digit
snake_case_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __snake_case ( lowercase : int = 100 ):
snake_case_ = factorial(lowercase )
snake_case_ = split_and_add(lowercase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 508 | 1 |
'''simple docstring'''
from functools import lru_cache
def _a ( _SCREAMING_SNAKE_CASE : int ):
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def _a ( _SCREAMING_SNAKE_CASE : int ):
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE : list ):
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def _a ( _SCREAMING_SNAKE_CASE : int ):
_SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
_SCREAMING_SNAKE_CASE = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_SCREAMING_SNAKE_CASE = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def _a ( _SCREAMING_SNAKE_CASE : int = 4 ):
_SCREAMING_SNAKE_CASE = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution()) | 493 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : Optional[int] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Tuple = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCamelCase )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_SCREAMING_SNAKE_CASE = DDIMScheduler(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase ( self , UpperCamelCase , UpperCamelCase=0 ):
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_SCREAMING_SNAKE_CASE = "A red cartoon frog, 4k"
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 493 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , _A , _A , _A) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_A , unet=_A , scheduler=_A)
@torch.no_grad()
def __call__( self , _A = 1 , _A = None , _A = 0.0 , _A = 50 , _A = "pil" , _A = True , **_A , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCAmelCase : int = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
_UpperCAmelCase : List[Any] = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase : int = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_UpperCAmelCase : List[str] = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCAmelCase : Union[str, Any] = {}
if accepts_eta:
_UpperCAmelCase : str = eta
for t in self.progress_bar(self.scheduler.timesteps):
_UpperCAmelCase : List[str] = self.scheduler.scale_model_input(_A , _A)
# predict the noise residual
_UpperCAmelCase : int = self.unet(_A , _A).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : Dict = self.scheduler.step(_A , _A , _A , **_A).prev_sample
# decode the image latents with the VAE
_UpperCAmelCase : Tuple = self.vqvae.decode(_A).sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Dict = self.numpy_to_pil(_A)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A)
| 485 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ : List[str] = CLIPTextModel(_A )
UpperCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : List[str] , _A : Tuple , _A : str=0 ) -> int:
UpperCAmelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : str = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : int = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : Tuple = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : str = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(**_A ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : int ) -> str:
UpperCAmelCase_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = '''french fries'''
UpperCAmelCase_ : Optional[Any] = sd_pipe(**_A , negative_prompt=_A )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Dict ) -> str:
UpperCAmelCase_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : int = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Any = [inputs['''prompt''']] * 2
UpperCAmelCase_ : int = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
UpperCAmelCase_ : int = torch.from_numpy(_A ).unsqueeze(0 ).to(_A )
UpperCAmelCase_ : List[str] = image / 2 + 0.5
UpperCAmelCase_ : str = image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Any = image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_ : List[Any] = sd_pipe(**_A ).images
UpperCAmelCase_ : str = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
UpperCAmelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : Dict = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = sd_pipe(**_A ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [round(_A , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_A ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Any = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : str ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : Tuple = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : Any = VaeImageProcessor(do_resize=_A , do_normalize=_A )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : int = pipe(**self.get_dummy_inputs_by_type(_A , input_image_type='''pt''' ) )[0]
UpperCAmelCase_ : Union[str, Any] = components['''vae''']
UpperCAmelCase_ : Any = self.get_dummy_inputs_by_type(_A , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ : int = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_ : List[Any] = pipe(**_A )[0]
UpperCAmelCase_ : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(_A , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] , _A : Optional[Any]=0 ) -> List[Any]:
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
UpperCAmelCase_ : Optional[int] = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[str] = self.get_inputs()
UpperCAmelCase_ : int = pipe(**_A ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
UpperCAmelCase_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[Any] = self.get_inputs()
UpperCAmelCase_ : str = pipe(**_A ).images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
UpperCAmelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Any = self.get_inputs()
UpperCAmelCase_ : Any = pipe(**_A ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Dict = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[str] = 0
def callback_fn(_A : int , _A : int , _A : torch.FloatTensor ) -> None:
UpperCAmelCase_ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase_ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Dict = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : Any = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Dict = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : int ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Dict = self.get_inputs()
UpperCAmelCase_ : List[str] = pipe(**_A )
UpperCAmelCase_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A ( self : str ) -> int:
UpperCAmelCase_ : List[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ : List[str] = inputs['''image'''].resize((5_04, 5_04) )
UpperCAmelCase_ : int = '''timbrooks/instruct-pix2pix'''
UpperCAmelCase_ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Optional[Any] = pipe(**_A )
UpperCAmelCase_ : Union[str, Any] = output.images[0]
UpperCAmelCase_ : List[str] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
UpperCAmelCase_ : List[str] = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 541 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case :List[str] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case :Any = parser.parse_args()
__snake_case :Optional[int] = '''cpu'''
__snake_case :List[str] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case :Any = '''path-to-your-trained-model'''
__snake_case :Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case :Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case :Tuple = pipe.to(device)
# to channels last
__snake_case :List[str] = pipe.unet.to(memory_format=torch.channels_last)
__snake_case :Tuple = pipe.vae.to(memory_format=torch.channels_last)
__snake_case :Tuple = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case :Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case :Any = torch.randn(2, 4, 64, 64)
__snake_case :Union[str, Any] = torch.rand(1) * 999
__snake_case :str = torch.randn(2, 77, 768)
__snake_case :Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
__snake_case :Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case :Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case :Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case :Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case :Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case :int = 666
__snake_case :Union[str, Any] = torch.Generator(device).manual_seed(seed)
__snake_case :int = {'''generator''': generator}
if args.steps is not None:
__snake_case :Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case :int = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 714 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCamelCase : int = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
UpperCamelCase : List[str] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
UpperCamelCase : int = BeautifulSoup(res.text, 'html.parser')
UpperCamelCase : Optional[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 50 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase__ : Union[str, Any] = TypeVar('T')
UpperCAmelCase__ : List[Any] = TypeVar('U')
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = key
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] | None = None
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] | None = None
def __repr__(self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
def __init__(self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rear, self.head
def __repr__(self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""DoubleLinkedList"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE__ : int = node
SCREAMING_SNAKE_CASE__ : Optional[Any] = previous
SCREAMING_SNAKE_CASE__ : List[str] = node
SCREAMING_SNAKE_CASE__ : List[Any] = self.rear
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.next
SCREAMING_SNAKE_CASE__ : Optional[int] = node.prev
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
return node
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
__UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : DoubleLinkedList[T, U] = DoubleLinkedList()
SCREAMING_SNAKE_CASE__ : List[Any] = capacity
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__(self , SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
return key in self.cache
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = self.cache[key]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE__ )
return node.val
self.miss += 1
return None
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE__ : Any = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE__ : List[str] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE__ : Optional[int] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
self.list.add(SCREAMING_SNAKE_CASE__ )
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(SCREAMING_SNAKE_CASE__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE__ : List[str] = LRUCache(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE__ : Tuple = func(*SCREAMING_SNAKE_CASE__ )
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE__ , """cache_info""" , SCREAMING_SNAKE_CASE__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple , *__snake_case : List[Any] , **__snake_case : List[Any] )-> List[str]:
super().__init__(*__snake_case , **__snake_case )
snake_case = {}
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict , *__snake_case : List[Any] , **__snake_case : int )-> Any:
snake_case = super().add_tokens(__snake_case , *__snake_case , **__snake_case )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] , *__snake_case : int , __snake_case : List[Any]=1 , **__snake_case : Any )-> Any:
snake_case = []
if num_vec_per_token == 1:
self.try_adding_tokens(__snake_case , *__snake_case , **__snake_case )
output.append(__snake_case )
else:
snake_case = []
for i in range(__snake_case ):
snake_case = placeholder_token + f'''_{i}'''
self.try_adding_tokens(__snake_case , *__snake_case , **__snake_case )
output.append(__snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
snake_case = output
def lowerCAmelCase ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Dict=False , __snake_case : int=1.0 )-> Dict:
if isinstance(__snake_case , __snake_case ):
snake_case = []
for i in range(len(__snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case = self.token_map[placeholder_token]
snake_case = tokens[: 1 + int(len(__snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case = copy.copy(__snake_case )
random.shuffle(__snake_case )
snake_case = text.replace(__snake_case , """ """.join(__snake_case ) )
return text
def __call__( self : List[str] , __snake_case : List[str] , *__snake_case : Dict , __snake_case : Dict=False , __snake_case : List[str]=1.0 , **__snake_case : Union[str, Any] )-> str:
return super().__call__(
self.replace_placeholder_tokens_in_text(
__snake_case , vector_shuffle=__snake_case , prop_tokens_to_load=__snake_case ) , *__snake_case , **__snake_case , )
def lowerCAmelCase ( self : int , __snake_case : List[Any] , *__snake_case : str , __snake_case : Any=False , __snake_case : Optional[int]=1.0 , **__snake_case : Optional[int] )-> Dict:
return super().encode(
self.replace_placeholder_tokens_in_text(
__snake_case , vector_shuffle=__snake_case , prop_tokens_to_load=__snake_case ) , *__snake_case , **__snake_case , )
| 517 |
'''simple docstring'''
from __future__ import annotations
import math
_SCREAMING_SNAKE_CASE = "2020.9.26"
_SCREAMING_SNAKE_CASE = "xcodz-dot, cclaus, dhruvmanila"
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> tuple[float, float]:
if not all(isinstance(__lowerCAmelCase , (float, int) ) for val in locals().values() ):
snake_case = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(__lowerCAmelCase )
snake_case = ((x * distance) / (z + distance)) * scale
snake_case = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : str , __lowerCAmelCase : float ) -> tuple[float, float, float]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Axis must be a str""" )
snake_case = locals()
del input_variables["axis"]
if not all(isinstance(__lowerCAmelCase , (float, int) ) for val in input_variables.values() ):
snake_case = (
"""Input values except axis must either be float or int: """
F'''{list(input_variables.values() )}'''
)
raise TypeError(__lowerCAmelCase )
snake_case = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
snake_case = x * math.cos(__lowerCAmelCase ) - y * math.sin(__lowerCAmelCase )
snake_case = y * math.cos(__lowerCAmelCase ) + x * math.sin(__lowerCAmelCase )
snake_case = z
elif axis == "x":
snake_case = y * math.cos(__lowerCAmelCase ) - z * math.sin(__lowerCAmelCase )
snake_case = z * math.cos(__lowerCAmelCase ) + y * math.sin(__lowerCAmelCase )
snake_case = x
elif axis == "y":
snake_case = x * math.cos(__lowerCAmelCase ) - z * math.sin(__lowerCAmelCase )
snake_case = z * math.cos(__lowerCAmelCase ) + x * math.sin(__lowerCAmelCase )
snake_case = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 517 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """▁"""
UpperCamelCase = {"""vocab_file""": """spiece.model"""}
UpperCamelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
UpperCamelCase = {
"""google/reformer-crime-and-punishment""": 52_4288,
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=[] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
A_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
A_ : Any = vocab_file
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->int:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _snake_case ( self )->Dict[str, int]:
'''simple docstring'''
A_ : Any = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self )->Tuple:
'''simple docstring'''
A_ : Any = self.__dict__.copy()
A_ : Dict = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Any = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
A_ : List[Any] = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Any = []
A_ : Optional[int] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
A_ : Any = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
A_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 590 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ , A_ : List[Any] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ , A_ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ : int = controlnet_params
A_ : Union[str, Any] = '''bird'''
A_ : Any = jax.device_count()
A_ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
A_ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
A_ : Dict = jax.random.PRNGKey(0 )
A_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
A_ : Optional[Any] = replicate(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = shard(_SCREAMING_SNAKE_CASE )
A_ : Dict = shard(_SCREAMING_SNAKE_CASE )
A_ : Any = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : List[Any] = images[0, 253:256, 253:256, -1]
A_ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : Union[str, Any] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ , A_ : Optional[int] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ , A_ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ : str = controlnet_params
A_ : Tuple = '''Chef in the kitchen'''
A_ : List[str] = jax.device_count()
A_ : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
A_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
A_ : List[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
A_ : List[str] = jax.random.PRNGKey(0 )
A_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
A_ : Dict = replicate(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = shard(_SCREAMING_SNAKE_CASE )
A_ : Any = shard(_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : int = images[0, 253:256, 253:256, -1]
A_ : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : Optional[Any] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 590 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A = 101 ):
"""simple docstring"""
lowerCamelCase : Dict = length
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self , __A ):
"""simple docstring"""
return i
class UpperCAmelCase_ :
'''simple docstring'''
def __call__( self , __A ):
"""simple docstring"""
return {"input_ids": torch.tensor(__A ), "labels": torch.tensor(__A )}
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase : Optional[int] = nn.Linear(120 , 80 )
def _snake_case ( self , __A , __A=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@require_torch_neuroncore
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase : Tuple = F"""--output_dir {output_dir}""".split()
lowerCamelCase : Union[str, Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@require_torch_multi_gpu
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCamelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCamelCase : int = F"""--output_dir {output_dir}""".split()
lowerCamelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(__A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case = HfArgumentParser((TrainingArguments,))
_snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
_snake_case = DummyDataset(dataset_length)
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
lowerCamelCase : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
_snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = 2
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = None
| 231 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = "whisper"
__A : List[str] = ["past_key_values"]
__A : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=5_1865 , __A=80 , __A=6 , __A=4 , __A=6 , __A=4 , __A=1536 , __A=1536 , __A=0.0 , __A=0.0 , __A=5_0257 , __A=True , __A=True , __A="gelu" , __A=256 , __A=0.0 , __A=0.0 , __A=0.0 , __A=0.02 , __A=False , __A=1500 , __A=448 , __A=5_0256 , __A=5_0256 , __A=5_0256 , __A=None , __A=[220, 5_0256] , __A=False , __A=256 , __A=False , __A=0.05 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A=7 , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : Optional[Any] = num_mel_bins
lowerCamelCase : List[Any] = d_model
lowerCamelCase : List[Any] = encoder_layers
lowerCamelCase : Optional[Any] = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_layers
lowerCamelCase : int = decoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : int = encoder_ffn_dim
lowerCamelCase : Union[str, Any] = dropout
lowerCamelCase : Dict = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : Optional[Any] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : List[Any] = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Optional[Any] = use_cache
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase : str = max_source_positions
lowerCamelCase : List[str] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : Optional[int] = classifier_proj_size
lowerCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Optional[int] = apply_spec_augment
lowerCamelCase : str = mask_time_prob
lowerCamelCase : Union[str, Any] = mask_time_length
lowerCamelCase : Union[str, Any] = mask_time_min_masks
lowerCamelCase : Any = mask_feature_prob
lowerCamelCase : Any = mask_feature_length
lowerCamelCase : Dict = mask_feature_min_masks
lowerCamelCase : List[str] = median_filter_width
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , suppress_tokens=__A , begin_suppress_tokens=__A , **__A , )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase : List[str] = {0: "batch"}
else:
lowerCamelCase : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="inputs" )
return common_inputs
def _snake_case ( self , __A , __A = -1 , __A = -1 , __A = False , __A = None , __A = 2_2050 , __A = 5.0 , __A = 220 , ):
"""simple docstring"""
lowerCamelCase : Any = OrderedDict()
lowerCamelCase : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__A , framework=__A , sampling_rate=__A , time_duration=__A , frequency=__A , )
lowerCamelCase : Any = encoder_inputs["input_features"].shape[2]
lowerCamelCase : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , __A , __A , __A , __A )
lowerCamelCase : List[Any] = encoder_inputs.pop("input_features" )
lowerCamelCase : Dict = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowerCamelCase : List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-3
| 231 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__A = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__A = """▁"""
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = "left"
lowercase_ = XLNetTokenizer
def __init__(self : Optional[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict="<s>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<sep>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Tuple="<cls>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : Optional[int]=["<eop>", "<eod>"] , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCamelCase__: Any =3
lowerCamelCase__: Optional[Any] =do_lower_case
lowerCamelCase__: Dict =remove_space
lowerCamelCase__: Any =keep_accents
lowerCamelCase__: str =vocab_file
lowerCamelCase__: Any =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: int =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] = None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =[self.sep_token_id]
lowerCamelCase__: Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any = None) ->Dict:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCamelCase__):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: Optional[int] =os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase__):
copyfile(self.vocab_file , lowerCamelCase__)
return (out_vocab_file,)
| 59 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : List[Any] , a_ : List[Any] , a_ : Tuple=13 , a_ : int=32 , a_ : Optional[Any]=2 , a_ : List[Any]=3 , a_ : str=16 , a_ : List[Any]=[1, 2, 1] , a_ : List[str]=[2, 2, 4] , a_ : str=2 , a_ : str=2.0 , a_ : Any=True , a_ : Optional[int]=0.0 , a_ : Dict=0.0 , a_ : int=0.1 , a_ : Optional[Any]="gelu" , a_ : Tuple=False , a_ : Any=True , a_ : int=0.02 , a_ : List[str]=1e-5 , a_ : List[str]=True , a_ : Optional[Any]=None , a_ : str=True , a_ : Optional[Any]=10 , a_ : int=8 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : Any = patch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Any = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[Any] = num_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = window_size
SCREAMING_SNAKE_CASE__ : str = mlp_ratio
SCREAMING_SNAKE_CASE__ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_norm
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : str = scope
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = encoder_stride
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : Optional[int] , a_ : List[Any] , a_ : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = SwinvaModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowercase( self : Optional[Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = SwinvaForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Dict = SwinvaForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : int , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int = SwinvaForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase_ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=a_ , embed_dim=37 )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowercase( self : str )-> List[str]:
"""simple docstring"""
pass
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(a_ , a_ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.attentions
SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(a_ ) , a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Tuple = config.window_size**2
SCREAMING_SNAKE_CASE__ : int = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE__ : str = len(a_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**self._prepare_for_class(a_ , a_ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE__ : Dict = 2
self.assertEqual(out_len + added_hidden_states , len(a_ ) )
SCREAMING_SNAKE_CASE__ : int = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowercase( self : str , a_ : Tuple , a_ : List[str] , a_ : Dict , a_ : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(a_ , a_ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a_ ) , a_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a_ ) , a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE__ : List[Any] = (
reshaped_hidden_states[0].view(a_ , a_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(a_ , a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
self.check_hidden_states_output(a_ , a_ , a_ , a_ )
def __lowercase( self : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
SCREAMING_SNAKE_CASE__ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(a_ , a_ , a_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(a_ , a_ , a_ , (padded_height, padded_width) )
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SwinvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = _config_zero_init(a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : Optional[int] )-> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : int = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
| 636 | import math
def _a ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = factor * value
SCREAMING_SNAKE_CASE__ : Dict = value
while not is_prime(lowercase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase__ )
return value
| 636 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=50_244 , lowerCAmelCase_ : List[str]=768 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[int]=1e-6 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Union[str, Any]="gelu_new" , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Tuple , ) -> List[Any]:
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = d_kv
UpperCAmelCase_ : Union[str, Any] = d_ff
UpperCAmelCase_ : Dict = num_layers
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : str = relative_attention_num_buckets
UpperCAmelCase_ : Any = relative_attention_max_distance
UpperCAmelCase_ : int = dropout_rate
UpperCAmelCase_ : List[str] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : List[str] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase_ : Any = dense_act_fn
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
UpperCAmelCase_ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self : Tuple , lowerCAmelCase_ : int=768 , lowerCAmelCase_ : List[str]=768 , lowerCAmelCase_ : Optional[Any]=2_048 , lowerCAmelCase_ : Optional[Any]=64 , lowerCAmelCase_ : Dict=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[int]="gelu_new" , lowerCAmelCase_ : Dict=1e-6 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Optional[int]=1e-10 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : Any=4_096 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : Optional[int]=128 , **lowerCAmelCase_ : List[Any] , ) -> str:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = patch_embed_hidden_size
UpperCAmelCase_ : int = d_ff
UpperCAmelCase_ : int = dropout_rate
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : str = initializer_factor
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Dict = dense_act_fn
UpperCAmelCase_ : Tuple = seq_len
UpperCAmelCase_ : Optional[Any] = relative_attention_num_buckets
UpperCAmelCase_ : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase_ : Optional[Any] = d_kv
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
UpperCAmelCase_ : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self : str , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=True , **lowerCAmelCase_ : int , ) -> Any:
super().__init__(tie_word_embeddings=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
if text_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
UpperCAmelCase_ : str = PixaStructTextConfig(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = PixaStructVisionConfig(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.text_config.decoder_start_token_id
UpperCAmelCase_ : Tuple = self.text_config.pad_token_id
UpperCAmelCase_ : List[str] = self.text_config.eos_token_id
UpperCAmelCase_ : Optional[Any] = initializer_factor
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[str] = self.initializer_range
UpperCAmelCase_ : Tuple = self.initializer_range
UpperCAmelCase_ : Union[str, Any] = is_vqa
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple , lowerCAmelCase_ : PixaStructTextConfig , lowerCAmelCase_ : PixaStructVisionConfig , **lowerCAmelCase_ : Union[str, Any] ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
UpperCAmelCase_ : List[str] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.text_config.to_dict()
UpperCAmelCase_ : Any = self.vision_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 95 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case : int = logging.getLogger()
def __lowercase ( __lowerCAmelCase : Path , __lowerCAmelCase : list ):
a__ = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open('w' ).writelines(SCREAMING_SNAKE_CASE_ )
snake_case : int = '''patrickvonplaten/t5-tiny-random'''
snake_case : str = '''sshleifer/bart-tiny-random'''
snake_case : Optional[int] = '''sshleifer/tiny-mbart'''
snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case_ (_UpperCAmelCase ):
def lowerCamelCase__( self :Tuple ,__snake_case :int ) -> Dict:
a__ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ ,lowercase__ )
a__ = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
a__ = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(lowercase__ ,'argv' ,lowercase__ ):
run_generate()
assert Path(lowercase__ ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
self.run_eval_tester(lowercase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ) -> Dict:
self.run_eval_tester(lowercase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ) -> int:
a__ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
a__ = Path(self.get_auto_remove_tmp_dir() )
a__ = str(tmp_dir / 'scores.json' )
a__ = str(tmp_dir / 'val.target' )
_dump_articles(lowercase__ ,text['en'] )
_dump_articles(lowercase__ ,text['de'] )
a__ = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ = F'\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowercase__ ,'argv' ,lowercase__ ):
with CaptureStdout() as cs:
run_search()
a__ = [" num_beams | length_penalty", model, "Best score args"]
a__ = ["Info"]
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowercase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__ ).exists()
os.remove(Path(lowercase__ ) )
| 702 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : List[Any] = analyze_text(UpperCamelCase__ )
_lowercase : Tuple = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Optional[Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Dict = single_char_strings[ch]
_lowercase : Any = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : Optional[int] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : List[str] = cha + cha
if sequence in two_char_strings:
_lowercase : List[str] = two_char_strings[sequence]
_lowercase : int = int(UpperCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
_lowercase : Any = Counter() # type: ignore
_lowercase : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __magic_name__ ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 66 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a_ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
a_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a_ = dict(zip(vocab, range(len(vocab))))
a_ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = Path(tmpdirname)
a_ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
a_ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
a_ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
a_ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a_ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a_ = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
a_ = tokenizer(['Making tiny model'], return_tensors='pt')
a_ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 296 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
snake_case__ : Optional[Any] = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BigBirdTokenizer
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 709 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ : List[Any] = logging.getLogger()
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase__ : str = parser.parse_args()
return args.f
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(SCREAMING_SNAKE_CASE_ , """argv""" , SCREAMING_SNAKE_CASE_):
lowercase__ : Any = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(SCREAMING_SNAKE_CASE_ , 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
| 12 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 1 |
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
lowercase_ : Tuple = int(_lowercase )
if n_element < 1:
lowercase_ : Dict = ValueError("""a should be a positive number""" )
raise my_error
lowercase_ : Optional[int] = [1]
lowercase_ : List[str] = (0, 0, 0)
lowercase_ : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
UpperCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""") | 712 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Optional[Any] = """xlnet"""
__a : Union[str, Any] = ["""mems"""]
__a : int = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, snake_case__=3_20_00, snake_case__=10_24, snake_case__=24, snake_case__=16, snake_case__=40_96, snake_case__="gelu", snake_case__=True, snake_case__="bi", snake_case__=0.02, snake_case__=1E-12, snake_case__=0.1, snake_case__=5_12, snake_case__=None, snake_case__=True, snake_case__=False, snake_case__=False, snake_case__=-1, snake_case__=False, snake_case__="last", snake_case__=True, snake_case__="tanh", snake_case__=0.1, snake_case__=5, snake_case__=5, snake_case__=5, snake_case__=1, snake_case__=2, **snake_case__, ) -> Dict:
"""simple docstring"""
lowercase_ : Tuple = vocab_size
lowercase_ : List[Any] = d_model
lowercase_ : Any = n_layer
lowercase_ : Tuple = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
lowercase_ : List[str] = d_model // n_head
lowercase_ : List[str] = ff_activation
lowercase_ : str = d_inner
lowercase_ : Optional[Any] = untie_r
lowercase_ : Optional[Any] = attn_type
lowercase_ : str = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : int = dropout
lowercase_ : Union[str, Any] = mem_len
lowercase_ : List[str] = reuse_len
lowercase_ : Dict = bi_data
lowercase_ : Any = clamp_len
lowercase_ : Tuple = same_length
lowercase_ : Dict = summary_type
lowercase_ : str = summary_use_proj
lowercase_ : Union[str, Any] = summary_activation
lowercase_ : Optional[Any] = summary_last_dropout
lowercase_ : str = start_n_top
lowercase_ : Dict = end_n_top
lowercase_ : Any = bos_token_id
lowercase_ : Optional[int] = pad_token_id
lowercase_ : List[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""", snake_case__, )
lowercase_ : Optional[Any] = kwargs["""use_cache"""]
lowercase_ : int = use_mems_eval
lowercase_ : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__, **snake_case__ )
@property
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def snake_case__ ( self, snake_case__ ) -> List[str]:
"""simple docstring"""
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 436 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = len(a )
while cur > 1:
# Find the maximum number in arr
__snake_case = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__snake_case = arr[mi::-1] + arr[mi + 1 : len(a )]
# Reverse whole list
__snake_case = arr[cur - 1 :: -1] + arr[cur : len(a )]
cur -= 1
return arr
if __name__ == "__main__":
_lowercase = input("""Enter numbers separated by a comma:\n""").strip()
_lowercase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 356 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case = sorted(string.lower() )
return len(a ) == len(set(a ) )
if __name__ == "__main__":
_lowercase = input("""Enter a string """).strip()
_lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 356 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : Union[str, Any] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 66 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Optional[int] = KandinskyVaaImgaImgPipeline
__lowercase :Dict = ["image_embeds", "negative_image_embeds", "image"]
__lowercase :Union[str, Any] = [
"image_embeds",
"negative_image_embeds",
"image",
]
__lowercase :str = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase :Union[str, Any] = False
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return 100
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase_ = DDIMScheduler(**UpperCamelCase__ )
lowerCamelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Any:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase_ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCamelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase_ = '''A red cartoon frog, 4k'''
lowerCamelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCamelCase_ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase_ = pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ ) | 66 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase : int = []
for line in lines:
_UpperCAmelCase : str = re.sub(r'''#.*''' , '''''' , UpperCamelCase__ ) # remove comments
if line:
filtered_lines.append(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = '''\n'''.join(UpperCamelCase__ )
# Make a hash from all this code
_UpperCAmelCase : Optional[Any] = full_str.encode('''utf-8''' )
return shaaaa(UpperCamelCase__ ).hexdigest()
# get importable module names and hash for caching
_lowerCAmelCase :Optional[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowerCAmelCase :str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowerCAmelCase :Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowerCAmelCase :Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 506 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =field(default='''automatic-speech-recognition''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ =Features({'''audio''': Audio()} )
a__ =Features({'''transcription''': Value('''string''' )} )
a__ ="audio"
a__ ="transcription"
def __lowerCAmelCase ( self , A ) -> List[str]:
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , A ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
_UpperCAmelCase : Dict = copy.deepcopy(self )
_UpperCAmelCase : Union[str, Any] = self.input_schema.copy()
_UpperCAmelCase : Dict = features[self.audio_column]
_UpperCAmelCase : Optional[int] = input_schema
return task_template
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 506 | 1 |
'''simple docstring'''
def __a ( A__ ):
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 720 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase : Tuple = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def __a ( A__ , A__ ) -> Optional[Any]:
return torch.atana(A__ , A__ ) / math.pi * 2
def __a ( A__ ) -> List[str]:
lowerCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(A__ , A__ )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCAmelCase = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE , n_attn_layers=4 )
lowerCAmelCase = deepcopy(self.diffusion )
lowerCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=SCREAMING_SNAKE_CASE )
def __a ( A__ ) -> Dict:
lowerCAmelCase = MODELS_MAP[model_name]["url"]
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
lowercase : List[Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowercase : int = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowercase : Optional[Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowercase : List[Any] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowercase : Optional[Any] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowercase : Union[str, Any] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def __a ( A__ ) -> str:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __a ( A__ ) -> List[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(A__ ) and not isinstance(A__ , A__ ):
return name.replace(A__ , A__ )
elif name.startswith(A__ ):
return [name.replace(A__ , A__ ) for v in value]
raise ValueError(f"Attn error with {name}" )
def __a ( A__ , A__=13 ) -> str:
lowerCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
lowerCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
lowerCAmelCase = string[6:]
elif string.startswith("net." ):
lowerCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
lowerCAmelCase = string[7:]
if string.startswith("main." ):
lowerCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase = string[:2]
lowerCAmelCase = string[2:]
else:
lowerCAmelCase = string[0]
lowerCAmelCase = string[1:]
if depth == max_depth:
lowerCAmelCase = MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase = "mid_block"
elif depth > 0 and int(A__ ) < 7:
lowerCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase = f"down_blocks.{depth}"
elif depth > 0 and int(A__ ) > 7:
lowerCAmelCase = UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
lowerCAmelCase = DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase = f"up_blocks.{max_depth - 1}" if int(A__ ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
lowerCAmelCase = string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase = convert_resconv_naming(A__ )
elif "attentions" in new_layer:
lowerCAmelCase = convert_attn_naming(A__ )
lowerCAmelCase = new_string_left
if not isinstance(A__ , A__ ):
lowerCAmelCase = prefix + "." + new_layer + "." + string_left
else:
lowerCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __a ( A__ ) -> str:
lowerCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase = rename(A__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(A__ , A__ ):
lowerCAmelCase = transform_conv_attns(A__ , A__ , A__ )
else:
lowerCAmelCase = v
return new_state_dict
def __a ( A__ , A__ , A__ ) -> Any:
if len(A__ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase = v[:, :, 0]
else:
# bias
lowerCAmelCase = v
else:
# qkv matrices
lowerCAmelCase = v.shape[0]
lowerCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __a ( A__ ) -> Dict:
lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
lowerCAmelCase = download(A__ )
lowerCAmelCase = MODELS_MAP[model_name]["sample_rate"]
lowerCAmelCase = MODELS_MAP[model_name]["sample_size"]
lowerCAmelCase = Object()
lowerCAmelCase = sample_size
lowerCAmelCase = sample_rate
lowerCAmelCase = 0
lowerCAmelCase = UNetaDModel(sample_size=A__ , sample_rate=A__ )
lowerCAmelCase = diffusers_model.state_dict()
lowerCAmelCase = DiffusionUncond(A__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=A__ )["state_dict"] )
lowerCAmelCase = orig_model.diffusion_ema.eval()
lowerCAmelCase = orig_model.state_dict()
lowerCAmelCase = rename_orig_weights(A__ )
lowerCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(A__ ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("kernel" ) for k in list(A__ ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
lowerCAmelCase = value.squeeze()
lowerCAmelCase = value
diffusers_model.load_state_dict(A__ )
lowerCAmelCase = 100
lowerCAmelCase = 33
lowerCAmelCase = IPNDMScheduler(num_train_timesteps=A__ )
lowerCAmelCase = torch.manual_seed(A__ )
lowerCAmelCase = torch.randn([1, 2, config.sample_size] , generator=A__ ).to(A__ )
lowerCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=A__ )[:-1]
lowerCAmelCase = get_crash_schedule(A__ )
lowerCAmelCase = DanceDiffusionPipeline(unet=A__ , scheduler=A__ )
lowerCAmelCase = torch.manual_seed(33 )
lowerCAmelCase = pipe(num_inference_steps=A__ , generator=A__ ).audios
lowerCAmelCase = sampling.iplms_sample(A__ , A__ , A__ , {} )
lowerCAmelCase = generated.clamp(-1 , 1 )
lowerCAmelCase = (generated - audio).abs().sum()
lowerCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , A__ )
print("Diff max" , A__ )
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowercase : Tuple = parser.parse_args()
main(args)
| 159 | 0 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = "RegNetConfig"
# Base docstring
SCREAMING_SNAKE_CASE__ = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE__ = [1, 1_088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE__ = "tabby, tabby cat"
SCREAMING_SNAKE_CASE__ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 3 , lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = "relu" , ):
"""simple docstring"""
super().__init__()
snake_case = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
snake_case = nn.BatchNormad(lowerCAmelCase )
snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.convolution(lowerCAmelCase )
snake_case = self.normalization(lowerCAmelCase )
snake_case = self.activation(lowerCAmelCase )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
super().__init__()
snake_case = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
snake_case = config.num_channels
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
snake_case = self.embedder(lowerCAmelCase )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 ):
"""simple docstring"""
super().__init__()
snake_case = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase )
snake_case = nn.BatchNormad(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.convolution(lowerCAmelCase )
snake_case = self.normalization(lowerCAmelCase )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
super().__init__()
snake_case = nn.AdaptiveAvgPoolad((1, 1) )
snake_case = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.pooler(lowerCAmelCase )
snake_case = self.attention(lowerCAmelCase )
snake_case = hidden_state * attention
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 ):
"""simple docstring"""
super().__init__()
snake_case = in_channels != out_channels or stride != 1
snake_case = max(1 , out_channels // config.groups_width )
snake_case = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
snake_case = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase ) , )
snake_case = ACTaFN[config.hidden_act]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = hidden_state
snake_case = self.layer(lowerCAmelCase )
snake_case = self.shortcut(lowerCAmelCase )
hidden_state += residual
snake_case = self.activation(lowerCAmelCase )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 ):
"""simple docstring"""
super().__init__()
snake_case = in_channels != out_channels or stride != 1
snake_case = max(1 , out_channels // config.groups_width )
snake_case = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
snake_case = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase ) , )
snake_case = ACTaFN[config.hidden_act]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = hidden_state
snake_case = self.layer(lowerCAmelCase )
snake_case = self.shortcut(lowerCAmelCase )
hidden_state += residual
snake_case = self.activation(lowerCAmelCase )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 2 , ):
"""simple docstring"""
super().__init__()
snake_case = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) for _ in range(depth - 1 )] , )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.layers(lowerCAmelCase )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
super().__init__()
snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
"""simple docstring"""
snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case = hidden_states + (hidden_state,)
snake_case = stage_module(lowerCAmelCase )
if output_hidden_states:
snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = RegNetConfig
_lowerCAmelCase : str = """regnet"""
_lowerCAmelCase : Any = """pixel_values"""
_lowerCAmelCase : Tuple = True
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
snake_case = value
SCREAMING_SNAKE_CASE__ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
SCREAMING_SNAKE_CASE__ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase )
snake_case = config
snake_case = RegNetEmbeddings(lowerCAmelCase )
snake_case = RegNetEncoder(lowerCAmelCase )
snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.embedder(lowerCAmelCase )
snake_case = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase )
snake_case = encoder_outputs[0]
snake_case = self.pooler(lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase )
snake_case = config.num_labels
snake_case = RegNetModel(lowerCAmelCase )
# classification head
snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case ( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase )
snake_case = outputs.pooler_output if return_dict else outputs[1]
snake_case = self.classifier(lowerCAmelCase )
snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case = 'single_label_classification'
else:
snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case = MSELoss()
if self.num_labels == 1:
snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case = loss_fct(lowerCAmelCase , lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case = BCEWithLogitsLoss()
snake_case = loss_fct(lowerCAmelCase , lowerCAmelCase )
if not return_dict:
snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states )
| 104 | """simple docstring"""
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
snake_case = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
# compute the shape of the output matrix
snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
snake_case = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
snake_case = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case = 0
snake_case = 0
return updated_arr
def lowerCAmelCase__ ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
snake_case = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
# compute the shape of the output matrix
snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
snake_case = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
snake_case = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case = 0
snake_case = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
SCREAMING_SNAKE_CASE__ = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 104 | 1 |
'''simple docstring'''
import os
def __snake_case ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '''/p022_names.txt''' ) as file:
UpperCAmelCase = str(file.readlines()[0] )
UpperCAmelCase = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
UpperCAmelCase = 0
UpperCAmelCase = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE_ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE_ ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 51 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 42
A__= 42
def __init__( self : Tuple , _lowercase : UNetaDModel , _lowercase : ScoreSdeVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Dict , _lowercase : int = 1 , _lowercase : int = 20_00 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , **_lowercase : Any , ):
"""simple docstring"""
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(_lowercase , generator=_lowercase ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(_lowercase )
self.scheduler.set_sigmas(_lowercase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_correct(_lowercase , _lowercase , generator=_lowercase ).prev_sample
# prediction step
UpperCAmelCase__ = model(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_pred(_lowercase , _lowercase , _lowercase , generator=_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowercase )
| 475 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''xlm-roberta-xl'''
def __init__( self :int , lowerCAmelCase__ :str=250_880 , lowerCAmelCase__ :Any=2_560 , lowerCAmelCase__ :Any=36 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[int]=10_240 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=514 , lowerCAmelCase__ :Tuple=1 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Any=1E-0_5 , lowerCAmelCase__ :Union[str, Any]=1 , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :Union[str, Any]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Tuple = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Tuple = hidden_act
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Any = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = position_embedding_type
snake_case_ : Optional[Any] = use_cache
snake_case_ : List[str] = classifier_dropout
class A_ (a_ ):
"""simple docstring"""
@property
def _A ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 717 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 0 |
from torch import nn
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 87 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a_ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
a_ : Optional[int] = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
a_ : Any = flax_key_tuple[:-1] + ("""weight""",)
a_ : Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a_ : Any = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
"""simple docstring"""
if "metadata" in layer:
a_ : Optional[Any] = layer.split("""metadata""" )
a_ : List[str] = """""".join(split_layer[0] )[:-1]
a_ : List[str] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a_ : Union[str, Any] = layer.split("""kvstore""" )
a_ : Tuple = """""".join(split_layer[0] )[:-1]
a_ : Union[str, Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a_ : List[str] = layer.split("""/""" )
a_ : Union[str, Any] = """/""".join(split_layer[:-1] )
a_ : Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
a_ : Any = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
a_ : Optional[int] = """file"""
else:
a_ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
a_ : int = rename_keys(UpperCamelCase__ )
a_ : Any = {}
for k, v in current_block.items():
a_ : Union[str, Any] = v
a_ : List[str] = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = WEIGHTS_NAME ):
"""simple docstring"""
a_ : str = convert_file_size_to_int(UpperCamelCase__ )
a_ : str = []
a_ : Optional[int] = {}
a_ : Optional[int] = 0
a_ : Any = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a_ : str = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a_ : List[str] = flatten_dict(UpperCamelCase__ , sep="""/""" )
a_ : Dict = {}
for layer in checkpoint_info.keys():
a_ , a_ , a_ : List[Any] = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
a_ : int = content
else:
a_ : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a_ : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a_ : List[Any] = torch.tensor(UpperCamelCase__ )
a_ : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a_ , a_ : Tuple = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase__ )
a_ : Dict = """/""".join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a_ : Any = os.path.join(
UpperCamelCase__ , weights_name.replace(""".bin""" , F"-{len(UpperCamelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
a_ : Optional[int] = {}
a_ : Union[str, Any] = 0
a_ : List[str] = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a_ : Any = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , F"-{len(UpperCamelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a_ : Tuple = {}
a_ : List[Any] = {}
for idx, shard in enumerate(UpperCamelCase__ ):
a_ : str = weights_name.replace(
""".bin""" , F"-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
a_ : Union[str, Any] = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
a_ : str = shard
for key in shard:
a_ : Union[str, Any] = shard_file
# Add the metadata
a_ : List[str] = {"""total_size""": total_size}
a_ : str = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , """w""" , encoding="""utf-8""" ) as f:
a_ : Any = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a_ : Union[str, Any] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a_ : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a_ : Union[str, Any] = TaTokenizer.from_pretrained("""t5-small""" )
a_ : Optional[Any] = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a_ : List[Any] = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids
a_ : str = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 442 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class __lowercase (_UpperCAmelCase ):
def __init__( self , *A_ , **A_ ) ->None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 583 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=128 , A_=32 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : List[str] = use_token_type_ids
__lowerCAmelCase : List[Any] = use_labels
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = max_position_embeddings
__lowerCAmelCase : Optional[int] = type_vocab_size
__lowerCAmelCase : Dict = type_sequence_label_size
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Optional[int] = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Tuple = scope
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : str = self.prepare_config_and_inputs()
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = NezhaModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ )
__lowerCAmelCase : Optional[int] = model(A_ , token_type_ids=A_ )
__lowerCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Dict = NezhaModel(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
__lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , )
__lowerCAmelCase : int = model(A_ , attention_mask=A_ , token_type_ids=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = NezhaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = NezhaForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = NezhaForPreTraining(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : List[str] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = NezhaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = self.num_labels
__lowerCAmelCase : Optional[int] = NezhaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : int = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : int = NezhaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Dict = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.num_choices
__lowerCAmelCase : int = NezhaForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Optional[int] = config_and_inputs
__lowerCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
def UpperCamelCase__ ( self , A_ , A_ , A_=False ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
__lowerCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
__lowerCAmelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = NezhaModelTester(self )
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCAmelCase : Tuple = None
self.model_tester.create_and_check_model_as_decoder(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[Any] = NezhaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(config=A_ )
__lowerCAmelCase : List[Any] = self._prepare_for_class(A_ , A_ )
__lowerCAmelCase : Tuple = torch.jit.trace(
A_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , '''bert.pt''' ) )
__lowerCAmelCase : Optional[int] = torch.jit.load(os.path.join(A_ , '''bert.pt''' ) , map_location=A_ )
loaded(inputs_dict['''input_ids'''].to(A_ ) , inputs_dict['''attention_mask'''].to(A_ ) )
@require_torch
class __lowercase (unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowerCAmelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(A_ , attention_mask=A_ )[0]
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowerCAmelCase : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(A_ , attention_mask=A_ )[0]
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : str = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
| 583 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( a_):
_UpperCAmelCase : List[str] = CLIPConfig
_UpperCAmelCase : int = ["CLIPEncoderLayer"]
def __init__( self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase : Tuple = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase : Tuple = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=0.5 , __magic_name__=0.5 ):
lowerCamelCase : Optional[Any] = self.vision_model(__magic_name__ )[0]
lowerCamelCase : Union[str, Any] = self.p_head(__magic_name__ )
lowerCamelCase : Tuple = nsfw_detected.flatten()
lowerCamelCase : Union[str, Any] = nsfw_detected > p_threshold
lowerCamelCase : List[str] = nsfw_detected.tolist()
if any(__magic_name__ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__magic_name__ ):
if nsfw_detected_:
lowerCamelCase : str = np.zeros(images[idx].shape )
lowerCamelCase : Union[str, Any] = self.w_head(__magic_name__ )
lowerCamelCase : Optional[Any] = watermark_detected.flatten()
lowerCamelCase : str = watermark_detected > w_threshold
lowerCamelCase : str = watermark_detected.tolist()
if any(__magic_name__ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__magic_name__ ):
if watermark_detected_:
lowerCamelCase : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 681 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457 | 0 |
'''simple docstring'''
from collections.abc import Callable
class a :
"""simple docstring"""
def __init__( self : Any , snake_case_ : Callable | None = None ):
'''simple docstring'''
snake_case__ : list = []
# Stores indexes of each item for supporting updates and deletion.
snake_case__ : dict = {}
# Stores current size of heap.
snake_case__ : int = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
snake_case__ : Optional[int] = key or (lambda snake_case_ : x)
def __magic_name__ ( self : Optional[Any] , snake_case_ : int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __magic_name__ ( self : Optional[Any] , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Optional[int] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __magic_name__ ( self : Dict , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __magic_name__ ( self : Optional[Any] , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ , snake_case__ : Any = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
snake_case__ , snake_case__ : Tuple = self.arr[j], self.arr[i]
def __magic_name__ ( self : int , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[str] = self._left(snake_case_ )
snake_case__ : int = self._right(snake_case_ )
snake_case__ : str = i
if left is not None and not self._cmp(snake_case_ , snake_case_ ):
snake_case__ : int = left
if right is not None and not self._cmp(snake_case_ , snake_case_ ):
snake_case__ : Tuple = right
return valid_parent
def __magic_name__ ( self : Dict , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Tuple = self._parent(snake_case_ )
while parent is not None and not self._cmp(snake_case_ , snake_case_ ):
self._swap(snake_case_ , snake_case_ )
snake_case__ , snake_case__ : Optional[Any] = parent, self._parent(snake_case_ )
def __magic_name__ ( self : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Tuple = self._get_valid_parent(snake_case_ )
while valid_parent != index:
self._swap(snake_case_ , snake_case_ )
snake_case__ , snake_case__ : Optional[Any] = valid_parent, self._get_valid_parent(snake_case_ )
def __magic_name__ ( self : List[Any] , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
if item not in self.pos_map:
return
snake_case__ : Any = self.pos_map[item]
snake_case__ : Optional[int] = [item, self.key(snake_case_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(snake_case_ )
self._heapify_down(snake_case_ )
def __magic_name__ ( self : List[Any] , snake_case_ : int ):
'''simple docstring'''
if item not in self.pos_map:
return
snake_case__ : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
snake_case__ : List[str] = self.arr[self.size - 1]
snake_case__ : Dict = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(snake_case_ )
self._heapify_down(snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(snake_case_ )] )
else:
snake_case__ : Any = [item, self.key(snake_case_ )]
snake_case__ : Dict = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __magic_name__ ( self : int ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _a ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 502 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ : List[Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _a ( __lowerCAmelCase : Any=None ):
"""simple docstring"""
if subparsers is not None:
snake_case__ : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
snake_case__ : str = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
snake_case__ : Any = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
snake_case__ : Optional[Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def _a ( __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
snake_case__ : Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : Union[str, Any] = defaults.commands
if not args.tpu_name:
snake_case__ : Union[str, Any] = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case__ : Union[str, Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
snake_case__ : Any = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
snake_case__ : str = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
snake_case__ : List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case__ : Tuple = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : str = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(__lowerCAmelCase )}""" )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def _a ( ):
"""simple docstring"""
snake_case__ : Dict = tpu_command_parser()
snake_case__ : Dict = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 502 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : Union[str, Any] ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A_ : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 483 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
a_ = torch.nn.Linear(10 , 10 )
a_ = torch.optim.SGD(model.parameters() , 0.1 )
a_ = Accelerator()
a_ = accelerator.prepare(_UpperCAmelCase )
try:
pickle.loads(pickle.dumps(_UpperCAmelCase ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state() | 483 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def __UpperCAmelCase( lowercase_ = 1_00_00_00 , lowercase_ = 10 ):
_lowerCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_lowerCamelCase : Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_lowerCamelCase : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 613 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3FeatureExtractor']
_lowerCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 613 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[Any] = list(range(len(__lowerCAmelCase ) ) )
lowerCamelCase : List[Any] = [v / w for v, w in zip(__lowerCAmelCase, __lowerCAmelCase )]
index.sort(key=lambda lowerCamelCase : ratio[i], reverse=__lowerCAmelCase )
lowerCamelCase : Any = 0
lowerCamelCase : Union[str, Any] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
lowerCamelCase : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase : str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
from functools import reduce
_A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase__ ( __lowerCAmelCase : str = N ):
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 290 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =10
_lowerCAmelCase =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
_lowerCAmelCase =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Dict , lowercase__ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
__SCREAMING_SNAKE_CASE : Optional[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
_lowerCAmelCase =FILE_CONTENT
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Any ):
'''simple docstring'''
import bza
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
_lowerCAmelCase =bytes(lowercase__ , """utf-8""" )
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
import gzip
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
_lowerCAmelCase =bytes(lowercase__ , """utf-8""" )
with gzip.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
_lowerCAmelCase =bytes(lowercase__ , """utf-8""" )
with lza.frame.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Any , lowercase__ : Union[str, Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Any , lowercase__ : Optional[int] ):
'''simple docstring'''
import tarfile
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
import lzma
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
_lowerCAmelCase =bytes(lowercase__ , """utf-8""" )
with lzma.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
import zipfile
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
_lowerCAmelCase =bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
_lowerCAmelCase =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
__SCREAMING_SNAKE_CASE : str = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__SCREAMING_SNAKE_CASE : int = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__SCREAMING_SNAKE_CASE : str = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__SCREAMING_SNAKE_CASE : Any = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__SCREAMING_SNAKE_CASE : Dict = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def snake_case_ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Dict ):
'''simple docstring'''
_lowerCAmelCase =datasets.Dataset.from_dict(lowercase__ )
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
_lowerCAmelCase =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Any ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
_lowerCAmelCase =csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
_lowerCAmelCase =csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : List[str] , lowercase__ : List[str] ):
'''simple docstring'''
import bza
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase__ , """rb""" ) as f:
_lowerCAmelCase =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : str , lowercase__ : Dict , lowercase__ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Any ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
_lowerCAmelCase =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase__ , """wb""" ) as f:
_lowerCAmelCase =pq.ParquetWriter(lowercase__ , schema=lowercase__ )
_lowerCAmelCase =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_lowerCAmelCase ={"""data""": DATA}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_lowerCAmelCase ={"""data""": DATA_DICT_OF_LISTS}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Any ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : int , lowercase__ : Any ):
'''simple docstring'''
import gzip
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Optional[int] ):
'''simple docstring'''
import gzip
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : str ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
_lowerCAmelCase =["""0""", """1""", """2""", """3"""]
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
_lowerCAmelCase =["""0""", """1""", """2""", """3"""]
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
_lowerCAmelCase =["""0""", """1""", """2""", """3"""]
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Tuple , lowercase__ : str , lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
_lowerCAmelCase =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def snake_case_ ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
_lowerCAmelCase =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 702 |
def snake_case_ ( lowercase__ : list[int] ):
'''simple docstring'''
_lowerCAmelCase =[]
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
_lowerCAmelCase =nums.pop(0 )
_lowerCAmelCase =permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def snake_case_ ( lowercase__ : Optional[Any] ):
'''simple docstring'''
def backtrack(lowercase__ : List[Any] ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__ , len(lowercase__ ) ):
_lowerCAmelCase , _lowerCAmelCase =nums[i], nums[start]
backtrack(start + 1 )
_lowerCAmelCase , _lowerCAmelCase =nums[i], nums[start] # backtrack
_lowerCAmelCase =[]
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__SCREAMING_SNAKE_CASE : Any = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 149 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = "AutoTokenizer"
__UpperCamelCase: Optional[Any] = ["tokenizer"]
__UpperCamelCase: Union[str, Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : List[str] , A : List[str] , A : int=None ):
super().__init__(A )
_UpperCAmelCase : Optional[Any] = speaker_embeddings
@classmethod
def _A ( cls : Optional[Any] , A : Tuple , A : int="speaker_embeddings_path.json" , **A : Tuple ):
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase : int = get_file_from_repo(
A , A , subfolder=kwargs.pop("subfolder" , A ) , cache_dir=kwargs.pop("cache_dir" , A ) , force_download=kwargs.pop("force_download" , A ) , proxies=kwargs.pop("proxies" , A ) , resume_download=kwargs.pop("resume_download" , A ) , local_files_only=kwargs.pop("local_files_only" , A ) , use_auth_token=kwargs.pop("use_auth_token" , A ) , revision=kwargs.pop("revision" , A ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(A , A )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase : List[str] = None
else:
with open(A ) as speaker_embeddings_json:
_UpperCAmelCase : List[str] = json.load(A )
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(A , **A )
return cls(tokenizer=A , speaker_embeddings=A )
def _A ( self : Dict , A : int , A : str="speaker_embeddings_path.json" , A : Any="speaker_embeddings" , A : bool = False , **A : Tuple , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A , A , "v2" ) , exist_ok=A )
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : Optional[int] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase : Optional[Any] = self._load_voice_preset(A )
_UpperCAmelCase : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , A , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=A , )
_UpperCAmelCase : Optional[int] = os.path.join(A , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase : str = tmp_dict
with open(os.path.join(A , A ) , "w" ) as fp:
json.dump(A , A )
super().save_pretrained(A , A , **A )
def _A ( self : Union[str, Any] , A : str = None , **A : Tuple ):
_UpperCAmelCase : int = self.speaker_embeddings[voice_preset]
_UpperCAmelCase : List[str] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase : int = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , A ) , cache_dir=kwargs.pop("cache_dir" , A ) , force_download=kwargs.pop("force_download" , A ) , proxies=kwargs.pop("proxies" , A ) , resume_download=kwargs.pop("resume_download" , A ) , local_files_only=kwargs.pop("local_files_only" , A ) , use_auth_token=kwargs.pop("use_auth_token" , A ) , revision=kwargs.pop("revision" , A ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase : List[str] = np.load(A )
return voice_preset_dict
def _A ( self : Optional[int] , A : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Optional[Any] , A : Optional[Any]=None , A : Optional[Any]=None , A : List[Any]="pt" , A : Any=256 , A : Union[str, Any]=False , A : Tuple=True , A : int=False , **A : List[str] , ):
if voice_preset is not None and not isinstance(A , A ):
if (
isinstance(A , A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase : str = self._load_voice_preset(A )
else:
if isinstance(A , A ) and not voice_preset.endswith(".npz" ):
_UpperCAmelCase : str = voice_preset + ".npz"
_UpperCAmelCase : Optional[Any] = np.load(A )
if voice_preset is not None:
self._validate_voice_preset_dict(A , **A )
_UpperCAmelCase : Any = BatchFeature(data=A , tensor_type=A )
_UpperCAmelCase : List[str] = self.tokenizer(
A , return_tensors=A , padding="max_length" , max_length=A , return_attention_mask=A , return_token_type_ids=A , add_special_tokens=A , **A , )
if voice_preset is not None:
_UpperCAmelCase : Optional[int] = voice_preset
return encoded_text
| 244 | '''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : Tuple , *A : Any , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[Any] , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : List[str] , *A : List[str] , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[str] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : List[str] , *A : int , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[Any] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[str] , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Optional[Any] , *A : Optional[Any] , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[int] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Optional[int] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : Optional[Any] , *A : int , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Optional[Any] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Optional[Any] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : int , *A : Optional[Any] , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Tuple , **A : int ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Tuple , *A : str , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Any , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Optional[int] , *A : Any , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[str] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Dict , *A : Any , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Any ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["torch"]
def __init__( self : int , *A : int , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Optional[int] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Optional[Any] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Union[str, Any] , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[Any] , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : int , **A : Any ):
requires_backends(cls , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : int , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Optional[int] , *A : Tuple , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : List[str] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Dict , *A : List[str] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Any , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : List[Any] , *A : int , **A : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Optional[int] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Optional[int] , *A : List[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : str , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Optional[int] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : List[str] , *A : List[Any] , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : str , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Tuple , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["torch"]
def __init__( self : Dict , *A : Optional[int] , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Any , **A : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[int] , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["torch"]
def __init__( self : Union[str, Any] , *A : int , **A : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[Any] , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Union[str, Any] , *A : List[str] , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Optional[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : List[str] , *A : Union[str, Any] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Tuple , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Union[str, Any] , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Dict , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Any , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Optional[Any] , *A : Union[str, Any] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : int , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : str , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : Optional[Any] , *A : Tuple , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Optional[Any] , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[Any] , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : str , *A : Dict , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[str] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["torch"]
def __init__( self : Any , *A : List[str] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Dict , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Optional[Any] , **A : Any ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Any , *A : List[str] , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Dict , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *A : Any , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Union[str, Any] , *A : List[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : str , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Dict , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Optional[int] , *A : Dict , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Dict , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : int , *A : int , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Tuple , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : List[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Dict , *A : List[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : Tuple , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : str , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["torch"]
def __init__( self : Tuple , *A : List[Any] , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : str , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[Any] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : Union[str, Any] , *A : Any , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : int , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Tuple , *A : List[Any] , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *A : Any , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : int , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : int , *A : List[str] , **A : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Optional[int] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Optional[Any] , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : int , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : Union[str, Any] , **A : int ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : Dict , *A : Any , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Optional[int] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Union[str, Any] , *A : Dict , **A : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : List[Any] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : int , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : Optional[Any] , *A : Optional[int] , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : Tuple , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Tuple , *A : Any , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Dict , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *A : Tuple , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : List[str] , *A : int , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : Dict , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Optional[Any] , *A : Dict , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Optional[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["torch"]
def __init__( self : Any , *A : Any , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : Optional[int] , **A : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Tuple , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : int , *A : str , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Union[str, Any] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[Any] , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["torch"]
def __init__( self : List[str] , *A : Any , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : List[Any] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : Union[str, Any] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : str , *A : Any , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Tuple , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Optional[int] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Optional[int] , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : int , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : str , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Optional[Any] , *A : Optional[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : List[str] , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : List[str] , *A : Optional[Any] , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : str , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : List[Any] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["torch"]
def __init__( self : Optional[int] , *A : Union[str, Any] , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Any , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : List[Any] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Union[str, Any] , *A : List[str] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : List[Any] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
| 244 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =tempfile.mkdtemp()
__A =5
# Realm tok
__A =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__A =os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
__A =os.path.join(lowercase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__A =os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def __UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =RealmConfig(num_block_records=self.num_block_records )
return config
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=lowercase__ , )
return block_records
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_config()
__A =self.get_dummy_retriever()
__A =retriever.tokenizer
__A =np.array([0, 3] , dtype='''long''' )
__A =tokenizer(['''Test question'''] ).input_ids
__A =tokenizer(
['''the fourth'''] , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , ).input_ids
__A =config.reader_seq_len
__A , __A , __A , __A =retriever(
lowercase__ , lowercase__ , answer_ids=lowercase__ , max_length=lowercase__ , return_tensors='''np''' )
self.assertEqual(len(lowercase__ ) , 2 )
self.assertEqual(len(lowercase__ ) , 2 )
self.assertEqual(len(lowercase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_config()
__A =self.get_dummy_retriever()
__A =retriever.tokenizer
__A =np.array([0, 3, 5] , dtype='''long''' )
__A =tokenizer(['''Test question'''] ).input_ids
__A =tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , ).input_ids
__A =config.reader_seq_len
__A , __A , __A , __A =retriever(
lowercase__ , lowercase__ , answer_ids=lowercase__ , max_length=lowercase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , lowercase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__A =retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__A =os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__A =RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 516 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 516 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : Tuple = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : List[str] = """resnet"""
__lowerCAmelCase : List[Any] = ["""basic""", """bottleneck"""]
def __init__( self , _A=3 , _A=6_4 , _A=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _A=[3, 4, 6, 3] , _A="bottleneck" , _A="relu" , _A=False , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : str = embedding_size
UpperCamelCase : List[Any] = hidden_sizes
UpperCamelCase : str = depths
UpperCamelCase : Tuple = layer_type
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : str = downsample_in_first_stage
UpperCamelCase : str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_A ) + 1 )]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Any = version.parse("""1.11""" )
@property
def _a ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ):
'''simple docstring'''
return 1e-3
| 102 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""")
snake_case__ = load_dataset("""ashraq/esc50""")
snake_case__ = dataset["""train"""]["""audio"""][-1]["""array"""]
snake_case__ = audio_classifier(UpperCamelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""])
self.assertEqual(
nested_simplify(UpperCamelCase__) , [{"""score""": 0.5_01, """label""": """Sound of a dog"""}, {"""score""": 0.4_99, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
snake_case__ = load_dataset("""ashraq/esc50""")
snake_case__ = dataset["""train"""]["""audio"""][-1]["""array"""]
snake_case__ = audio_classifier(UpperCamelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""])
self.assertEqual(
nested_simplify(UpperCamelCase__) , [
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
] , )
snake_case__ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""])
self.assertEqual(
nested_simplify(UpperCamelCase__) , [
[
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
snake_case__ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5)
self.assertEqual(
nested_simplify(UpperCamelCase__) , [
[
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
| 720 |
import random
from typing import Any
def _UpperCAmelCase ( a : list ):
for _ in range(len(a ) ):
snake_case__ = random.randint(0 , len(a ) - 1 )
snake_case__ = random.randint(0 , len(a ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
a__ = [0, 1, 2, 3, 4, 5, 6, 7]
a__ = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 99 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.