code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "swinv2"
lowerCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Union[str, Any]=96 , _UpperCAmelCase : str=[2, 2, 6, 2] , _UpperCAmelCase : Dict=[3, 6, 12, 24] , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : int=4.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : int=1E-5 , _UpperCAmelCase : List[Any]=32 , **_UpperCAmelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : int = len(_UpperCAmelCase )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : List[str] = mlp_ratio
_lowerCAmelCase : str = qkv_bias
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : int = use_absolute_embeddings
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
_lowerCAmelCase : int = (0, 0, 0, 0)
| 429 |
from collections import deque
from .hash_table import HashTable
class __snake_case (_a ):
def __init__( self : int , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
_lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.values[key]
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase , _UpperCAmelCase )
| 429 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :int ):
__lowerCamelCase : Dict =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__lowerCamelCase : Union[str, Any] =Vector()
def __lowercase ( self :Any ):
__lowerCamelCase : Optional[int] =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase__ ) , '''(0,0,0,0,0,1)''' )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : List[str] =Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase__ ) , 4 )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : Dict =Vector([1, 2] )
__lowerCamelCase : Union[str, Any] =Vector([1, 2, 3, 4, 5] )
__lowerCamelCase : Dict =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowerCamelCase : Union[str, Any] =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowercase ( self :int ):
__lowerCamelCase : Dict =Vector([1, 2, 3] )
__lowerCamelCase : int =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : int =Vector([1, 2, 3] )
__lowerCamelCase : Tuple =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowercase ( self :List[str] ):
__lowerCamelCase : str =Vector([1, 2, 3] )
__lowerCamelCase : Any =Vector([2, -1, 4] ) # for test of dot product
__lowerCamelCase : str =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __lowercase ( self :List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __lowercase ( self :Optional[int] ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : List[Any] =Vector([1, 2, 3] )
__lowerCamelCase : Any =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase__ , lowercase__ ) ) , '''(3,4,7)''' )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Optional[int] =Vector([1, 0, 0, 0, 0, 0] )
__lowerCamelCase : List[Any] =x.copy()
self.assertEqual(str(lowercase__ ) , str(lowercase__ ) )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : List[str] =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase__ ) , '''(0,1,0)''' )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(lowercase__ ) )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Dict =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase__ , lowercase__ ) )
def __lowercase ( self :Any ):
__lowerCamelCase : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : List[Any] =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase__ , lowercase__ ) )
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[str] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : str =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__lowerCamelCase : Union[str, Any] =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(lowercase__ ) )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Dict =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Optional[Any] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __lowercase ( self :int ):
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 703 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
__lowerCamelCase : int =from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowerCamelCase : str =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowerCamelCase : List[str] =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =''''''
__lowerCamelCase : List[str] =flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
__lowerCamelCase : List[str] =pt_model.state_dict()
# keep track of unexpected & missing keys
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCamelCase : Tuple =flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowerCamelCase : Optional[int] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Union[str, Any] =jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowerCamelCase : Optional[Any] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Dict =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowerCamelCase : str =flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =(
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__lowerCamelCase : Optional[int] ='''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__lowerCamelCase : Any =torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__lowerCamelCase : Optional[Any] =list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 363 | 0 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase__ : Dict = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCamelCase__ : Any = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCamelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase__ : str = f'''down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : List[Any] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase__ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : str = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase__ : List[Any] = f'''up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : Dict = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase__ : Union[str, Any] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCamelCase__ : Union[str, Any] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase__ : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase__ : Dict = 'mid_block.attentions.0.'
lowerCamelCase__ : Any = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase__ : Union[str, Any] = f'''mid_block.resnets.{j}.'''
lowerCamelCase__ : int = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> int:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
SCREAMING_SNAKE_CASE_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase__ : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Union[str, Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase__ : int = f'''down_blocks.{i}.downsamplers.0.'''
lowerCamelCase__ : List[Any] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase__ : List[Any] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : int = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase__ : Dict = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Any = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase__ : List[str] = f'''mid_block.resnets.{i}.'''
lowerCamelCase__ : str = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[str]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
SCREAMING_SNAKE_CASE_ = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE_ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE_ = reshape_weight_for_sd(__UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCamelCase__ : Tuple = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase__ : str = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
SCREAMING_SNAKE_CASE_ = k[: -len('.q_proj.weight' )]
SCREAMING_SNAKE_CASE_ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE_ = [None, None, None]
SCREAMING_SNAKE_CASE_ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
SCREAMING_SNAKE_CASE_ = k[: -len('.q_proj.bias' )]
SCREAMING_SNAKE_CASE_ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE_ = [None, None, None]
SCREAMING_SNAKE_CASE_ = v
continue
SCREAMING_SNAKE_CASE_ = textenc_pattern.sub(lambda __UpperCAmelCase : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE_ = textenc_pattern.sub(lambda __UpperCAmelCase : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.cat(__UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE_ = textenc_pattern.sub(lambda __UpperCAmelCase : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.cat(__UpperCAmelCase )
return new_state_dict
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCamelCase__ : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase__ : Union[str, Any] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCamelCase__ : Dict = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCamelCase__ : List[str] = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase__ : Any = load_file(unet_path, device='cpu')
else:
lowerCamelCase__ : List[str] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCamelCase__ : Any = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCamelCase__ : Tuple = load_file(vae_path, device='cpu')
else:
lowerCamelCase__ : Any = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCamelCase__ : str = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCamelCase__ : List[str] = load_file(text_enc_path, device='cpu')
else:
lowerCamelCase__ : Optional[Any] = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCamelCase__ : int = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCamelCase__ : List[str] = convert_unet_state_dict(unet_state_dict)
lowerCamelCase__ : Union[str, Any] = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCamelCase__ : int = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase__ : str = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase__ : int = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCamelCase__ : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase__ : Any = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase__ : List[str] = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase__ : Any = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase__ : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase__ : Dict = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase__ : Optional[int] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path) | 31 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 391 | 0 |
'''simple docstring'''
import operator
def __UpperCAmelCase ( a_: list, a_: bool = False, a_: list | None = None ):
_UpperCAmelCase : Tuple = operator.lt if reverse else operator.gt
_UpperCAmelCase : Any = solution or []
if not arr:
return solution
_UpperCAmelCase : Any = [arr.pop(0 )]
for i, item in enumerate(a_ ):
if _operator(a_, sublist[-1] ):
sublist.append(a_ )
arr.pop(a_ )
# merging sublist into solution list
if not solution:
solution.extend(a_ )
else:
while sublist:
_UpperCAmelCase : List[str] = sublist.pop(0 )
for i, xx in enumerate(a_ ):
if not _operator(a_, a_ ):
solution.insert(a_, a_ )
break
else:
solution.append(a_ )
strand_sort(a_, a_, a_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 714 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 257 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase ( _a , _a=False ) -> str:
'''simple docstring'''
lowercase_ :int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ :Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCamelCase ( _a , _a , _a=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ :Optional[int] = ''''''
else:
lowercase_ :Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ :List[str] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowercase_ :Tuple = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ :str = in_proj_weight[
: config.hidden_size, :
]
lowercase_ :Any = in_proj_bias[: config.hidden_size]
lowercase_ :Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ :Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ :int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _a ) -> Tuple:
'''simple docstring'''
lowercase_ :Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :Union[str, Any] = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def UpperCamelCase ( _a , _a , _a ) -> Optional[int]:
'''simple docstring'''
lowercase_ :Union[str, Any] = dct.pop(_a )
lowercase_ :Union[str, Any] = val
def UpperCamelCase ( _a , _a ) -> Optional[int]:
'''simple docstring'''
lowercase_ :Tuple = ViTMSNConfig()
lowercase_ :Optional[Any] = 1_0_0_0
lowercase_ :List[Any] = '''datasets/huggingface/label-files'''
lowercase_ :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase_ :int = json.load(open(hf_hub_download(_a , _a ) , '''r''' ) )
lowercase_ :List[Any] = {int(_a ): v for k, v in idalabel.items()}
lowercase_ :Union[str, Any] = idalabel
lowercase_ :Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ :Union[str, Any] = 3_8_4
lowercase_ :List[Any] = 1_5_3_6
lowercase_ :Optional[Any] = 6
elif "l16" in checkpoint_url:
lowercase_ :Union[str, Any] = 1_0_2_4
lowercase_ :List[Any] = 4_0_9_6
lowercase_ :List[Any] = 2_4
lowercase_ :int = 1_6
lowercase_ :str = 0.1
elif "b4" in checkpoint_url:
lowercase_ :List[str] = 4
elif "l7" in checkpoint_url:
lowercase_ :List[Any] = 7
lowercase_ :int = 1_0_2_4
lowercase_ :Any = 4_0_9_6
lowercase_ :Union[str, Any] = 2_4
lowercase_ :Tuple = 1_6
lowercase_ :int = 0.1
lowercase_ :Tuple = ViTMSNModel(_a )
lowercase_ :Any = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )['''target_encoder''']
lowercase_ :Optional[Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_a )
lowercase_ :Dict = create_rename_keys(_a , base_model=_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , base_model=_a )
model.load_state_dict(_a )
model.eval()
lowercase_ :Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ :Union[str, Any] = Image.open(requests.get(_a , stream=_a ).raw )
lowercase_ :str = ViTImageProcessor(
size=config.image_size , image_mean=_a , image_std=_a )
lowercase_ :Dict = image_processor(images=_a , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase_ :Tuple = model(**_a )
lowercase_ :Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ :Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ :Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ :str = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ :List[Any] = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ :List[Any] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _a , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 257 | import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ :
def __init__( self : str , lowercase__ : Optional[Any] , lowercase__ : str=2 , lowercase__ : Union[str, Any]=3 , lowercase__ : Dict=4 , lowercase__ : Any=2 , lowercase__ : Any=7 , lowercase__ : Dict=True , lowercase__ : Any=True , lowercase__ : Optional[int]=True , lowercase__ : str=True , lowercase__ : Optional[Any]=99 , lowercase__ : int=36 , lowercase__ : Union[str, Any]=3 , lowercase__ : List[Any]=4 , lowercase__ : List[str]=37 , lowercase__ : Optional[Any]="gelu" , lowercase__ : Optional[int]=0.1 , lowercase__ : List[Any]=0.1 , lowercase__ : Tuple=5_12 , lowercase__ : Optional[Any]=16 , lowercase__ : Tuple=2 , lowercase__ : Optional[int]=0.0_2 , lowercase__ : List[str]=6 , lowercase__ : List[Any]=6 , lowercase__ : Optional[int]=3 , lowercase__ : Optional[Any]=4 , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=10_00 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = text_seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = coordinate_size
_lowerCAmelCase = shape_size
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase = text_seq_length
_lowerCAmelCase = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase = bbox[i, j, 3]
_lowerCAmelCase = bbox[i, j, 1]
_lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase = bbox[i, j, 2]
_lowerCAmelCase = bbox[i, j, 0]
_lowerCAmelCase = t
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[Any] ):
_lowerCAmelCase = LayoutLMvaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# text + image
_lowerCAmelCase = model(lowercase__ , pixel_values=lowercase__ )
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
_lowerCAmelCase = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , token_type_ids=lowercase__ )
_lowerCAmelCase = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase = model(pixel_values=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : int ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LayoutLMvaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : int ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LayoutLMvaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ =(
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = LayoutLMvaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any=False ):
_lowerCAmelCase = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
_lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
_lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
elif model_class in get_values(lowercase__ ):
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
elif model_class in [
*get_values(lowercase__ ),
]:
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
elif model_class in [
*get_values(lowercase__ ),
]:
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase__ , )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = LayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( ):
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowercase__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowercase__ , return_tensors='pt' ).pixel_values.to(lowercase__ )
_lowerCAmelCase = torch.tensor([[1, 2]] )
_lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase = model(
input_ids=input_ids.to(lowercase__ ) , bbox=bbox.to(lowercase__ ) , pixel_values=pixel_values.to(lowercase__ ) , )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
_lowerCAmelCase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 192 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : int = {}
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
__lowercase : Optional[int] = super().add_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=1 , **UpperCamelCase_ ) -> Optional[Any]:
__lowercase : Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
else:
__lowercase : List[Any] = []
for i in range(UpperCamelCase_ ):
__lowercase : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
__lowercase : int = output
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 ) -> Tuple:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase : List[Any] = self.token_map[placeholder_token]
__lowercase : Optional[Any] = tokens[: 1 + int(len(UpperCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase : int = copy.copy(UpperCamelCase_ )
random.shuffle(UpperCamelCase_ )
__lowercase : Tuple = text.replace(UpperCamelCase_ , ''' '''.join(UpperCamelCase_ ) )
return text
def __call__( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Optional[Any]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> int:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
| 523 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
lowercase : Dict =len(bin(__magic_name__ )[3:] )
lowercase : List[Any] =bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
lowercase : List[str] =(
(
'''1'''
+ '''0''' * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ :Any = [
'good first issue',
'feature request',
'wip',
]
def lowerCamelCase_ ( ) ->str:
"""simple docstring"""
__UpperCAmelCase : str = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCAmelCase : Tuple = g.get_repo('''huggingface/accelerate''' )
__UpperCAmelCase : str = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
__UpperCAmelCase : Optional[Any] = dt.utcnow()
__UpperCAmelCase : Optional[int] = (current_time - issue.updated_at).days
__UpperCAmelCase : Optional[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 704 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase__ :Optional[int] = logging.get_logger(__name__)
@dataclass
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Tuple = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Dict , **__lowercase : Dict ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : Optional[Any] = deprecated_arg[3:]
__UpperCAmelCase : List[str] = not kwargs.pop(__lowercase )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase : int = kwargs.pop('''tpu_name''' , self.tpu_name )
__UpperCAmelCase : Any = kwargs.pop('''device_idx''' , self.device_idx )
__UpperCAmelCase : Tuple = kwargs.pop('''eager_mode''' , self.eager_mode )
__UpperCAmelCase : Optional[int] = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**__lowercase )
_A : str = field(
default=__UpperCAmelCase , metadata={'help': 'Name of TPU'} , )
_A : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_A : bool = field(default=__UpperCAmelCase , metadata={'help': 'Benchmark models in eager model.'} )
_A : bool = field(
default=__UpperCAmelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def A_ ( self : int ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
__UpperCAmelCase : Any = None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase : int = None
return tpu
@cached_property
def A_ ( self : Any ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
__UpperCAmelCase : Any = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
__UpperCAmelCase : str = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def A_ ( self : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def A_ ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def A_ ( self : Any ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def A_ ( self : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def A_ ( self : Optional[Any] ):
'''simple docstring'''
return self.n_gpu > 0 | 374 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__ :
"""simple docstring"""
a__ : str
a__ : str = None
@staticmethod
def snake_case_ ( ) -> List[str]:
raise NotImplementedError
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : str ) -> int:
raise NotImplementedError
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : int ) -> List[Any]:
raise NotImplementedError
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def snake_case_ ( cls : Optional[Any] ) -> List[str]:
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[str] = "optuna"
@staticmethod
def snake_case_ ( ) -> int:
return is_optuna_available()
def snake_case_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : int ) -> Any:
return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Tuple ) -> Dict:
return default_hp_space_optuna(__lowerCAmelCase )
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[str] = "ray"
a__ : Tuple = "'ray[tune]'"
@staticmethod
def snake_case_ ( ) -> List[str]:
return is_ray_available()
def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ) -> Optional[int]:
return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> int:
return default_hp_space_ray(__lowerCAmelCase )
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Optional[int] = "sigopt"
@staticmethod
def snake_case_ ( ) -> Optional[Any]:
return is_sigopt_available()
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : int ) -> List[str]:
return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Dict ) -> int:
return default_hp_space_sigopt(__lowerCAmelCase )
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = "wandb"
@staticmethod
def snake_case_ ( ) -> int:
return is_wandb_available()
def snake_case_ ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : List[str] ) -> Dict:
return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> Dict:
return default_hp_space_wandb(__lowerCAmelCase )
UpperCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_A = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_snake_case ) > 0:
_A = available_backends[0].name
if len(_snake_case ) > 1:
logger.info(
F'''{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 2 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __a ( A , A ) -> str:
'''simple docstring'''
A__ = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __a ( A ) -> List[str]:
'''simple docstring'''
A__ = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def __a ( ) -> Dict:
'''simple docstring'''
A__ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __a ( A , A , A , A ) -> int:
'''simple docstring'''
A__ = "imagenet-1k-id2label.json"
A__ = 1_000
A__ = "huggingface/label-files"
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
A__ = {int(A ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = A__ = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
A__ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
A__ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A__ = [2, 2, 20]
A__ = [3, 12, 16]
A__ = [192, 768, 1_024]
A__ = CvtForImageClassification(A )
A__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
A__ = image_size
A__ = torch.load(A , map_location=torch.device("cpu" ) )
A__ = OrderedDict()
A__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A__ = list_of_state_dict + cls_token(A )
A__ = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
A__ = list_of_state_dict + attention(A , A )
A__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
A__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCAmelCase =parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 337 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 570 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(SCREAMING_SNAKE_CASE_ , exponent // 2 , SCREAMING_SNAKE_CASE_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE_ , exponent - 1 , SCREAMING_SNAKE_CASE_ )) % modulo_value
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 1_777 , SCREAMING_SNAKE_CASE_ : int = 1_855 , SCREAMING_SNAKE_CASE_ : int = 8 ) -> int:
"""simple docstring"""
UpperCAmelCase = base
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = _modexpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 570 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
if openai_config_file == "":
A__ = OpenAIGPTConfig()
else:
A__ = OpenAIGPTConfig.from_json_file(lowercase_ )
A__ = OpenAIGPTModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
A__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 87 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
snake_case = False
class UpperCAmelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_snake_case = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_snake_case = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 710 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ = 1000 ) -> int:
return sum(e for e in range(3 , lowerCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 404 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : str = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class snake_case_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "gpt_neox"
def __init__( self : Dict , __lowerCamelCase : str=50_432 , __lowerCamelCase : Union[str, Any]=6_144 , __lowerCamelCase : List[str]=44 , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : str=24_576 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : Any=10_000 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=2_048 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Any=1E-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : str=0 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , __lowerCamelCase : str=None , **__lowerCamelCase : int , ) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = attention_dropout
__lowercase = hidden_dropout
__lowercase = classifier_dropout
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = tie_word_embeddings
__lowercase = use_parallel_residual
__lowercase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
__lowercase = self.rope_scaling.get('type' , __lowerCamelCase )
__lowercase = self.rope_scaling.get('factor' , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 375 |
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->list:
"""simple docstring"""
__magic_name__ : Optional[Any] = word.split()
def justify(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) -> str:
__magic_name__ : int = max_width - width
__magic_name__ : Optional[int] = len(UpperCAmelCase )
if len(UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__magic_name__ : List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__magic_name__ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__magic_name__ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
__magic_name__ : List[str] = []
for i in range(UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCAmelCase )
__magic_name__ : List[Any] = []
__magic_name__ : list[str] = []
__magic_name__ : List[str] = 0
for word in words:
if width + len(UpperCAmelCase ) + len(UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCAmelCase )
width += len(UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) )
# reset new line and new width
__magic_name__ , __magic_name__ : Optional[int] = [word], len(UpperCAmelCase )
__magic_name__ : List[str] = max_width - width - len(UpperCAmelCase )
answer.append(''' '''.join(UpperCAmelCase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 154 | 0 |
import os
import sys
import unittest
_snake_case : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_snake_case : Optional[Any] = os.path.join(git_repo_path, 'src', 'transformers')
_snake_case : str = """
{0} = None
"""
_snake_case : int = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
_snake_case : Any = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowerCAmelCase_ )
__lowerCAmelCase = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowerCAmelCase_ , 'tokenizers' )
__lowerCAmelCase = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowerCAmelCase_ , 'tensorflow_text' )
__lowerCAmelCase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowerCAmelCase_ , 'sentencepiece_and_tokenizers' )
__lowerCAmelCase = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowerCAmelCase_ , 'sentencepiece_and_tensorflow_text' )
__lowerCAmelCase = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowerCAmelCase_ , 'sentencepiece_and_tokenizers_and_vision' )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase_ )
self.assertIn('tensorflow_text' , lowerCAmelCase_ )
self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , '\nCONSTANT = None\n' )
__lowerCAmelCase = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__lowerCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
__lowerCAmelCase = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
__lowerCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase_ )
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 421 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCAmelCase : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = {"BertModelTest": "BertModelTester"}
_lowerCamelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCamelCase : Optional[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCamelCase : List[str] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase ) | 46 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """roc_bert"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0_5_2_2 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=3_0_7_2 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=9_1_0 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2_4_8_5_8 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
a_ : Optional[int] = vocab_size
a_ : str = max_position_embeddings
a_ : List[Any] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : List[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = initializer_range
a_ : str = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : str = use_cache
a_ : Tuple = enable_pronunciation
a_ : Dict = enable_shape
a_ : int = pronunciation_embed_dim
a_ : List[Any] = pronunciation_vocab_size
a_ : int = shape_embed_dim
a_ : List[str] = shape_vocab_size
a_ : List[Any] = concat_input
a_ : List[str] = position_embedding_type
a_ : Any = classifier_dropout
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 473 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
snake_case_ : List[Any] = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
snake_case_ : List[Any] = [ord(letter) for letter in string.ascii_lowercase]
snake_case_ : str = {ord(char) for char in VALID_CHARS}
snake_case_ : str = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def __snake_case ( _UpperCAmelCase : list[int], _UpperCAmelCase : tuple[int, ...]):
UpperCamelCase = ""
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(_UpperCAmelCase), _UpperCAmelCase):
UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCAmelCase)
return decoded
def __snake_case ( _UpperCAmelCase : list[int]):
UpperCamelCase = []
for key in product(_UpperCAmelCase, repeat=3):
UpperCamelCase = try_key(_UpperCAmelCase, _UpperCAmelCase)
if encoded is not None:
possibles.append(_UpperCAmelCase)
return possibles
def __snake_case ( _UpperCAmelCase : list[str], _UpperCAmelCase : str):
return [possible for possible in possibles if common_word in possible.lower()]
def __snake_case ( _UpperCAmelCase : str = "p059_cipher.txt"):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = Path(_UpperCAmelCase).parent.joinpath(_UpperCAmelCase).read_text(encoding='''utf-8''')
UpperCamelCase = [int(_UpperCAmelCase) for number in data.strip().split(''',''')]
UpperCamelCase = filter_valid_chars(_UpperCAmelCase)
for common_word in COMMON_WORDS:
UpperCamelCase = filter_common_word(_UpperCAmelCase, _UpperCAmelCase)
if len(_UpperCAmelCase) == 1:
break
UpperCamelCase = possibles[0]
return sum(ord(_UpperCAmelCase) for char in decoded_text)
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=1_8 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase = size if size is not None else {'''shortest_edge''': 1_8}
UpperCamelCase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = LevitImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 350 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __A ( a_ : int ):
lowerCAmelCase : Any = year % 1_9
lowerCAmelCase : str = year % 4
lowerCAmelCase : Union[str, Any] = year % 7
lowerCAmelCase : Optional[int] = math.floor(year / 1_0_0 )
lowerCAmelCase : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
lowerCAmelCase : Any = leap_day_inhibits / 4
lowerCAmelCase : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
lowerCAmelCase : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase : Optional[int] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
lowerCAmelCase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(a_ ,4 ,1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(a_ ,4 ,1_8 )
else:
return datetime(a_ ,3 ,2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase = 'will be' if year > datetime.now().year else 'was'
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 525 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def UpperCAmelCase__ ( __magic_name__ : Optional[int] ):
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __magic_name__ ( snake_case ):
_lowerCAmelCase = ["pixel_values"]
def __init__( self : str , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(**lowerCamelCase__ )
lowerCAmelCase : List[str] = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase : Dict = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
lowerCAmelCase : List[str] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase : int = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
lowerCAmelCase : Optional[Any] = do_resize
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : Optional[Any] = do_center_crop
lowerCAmelCase : List[str] = crop_size
lowerCAmelCase : List[str] = resample
lowerCAmelCase : int = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : Optional[Any] = offset
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ):
lowerCAmelCase : Union[str, Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" in size:
lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(lowerCamelCase__ , size['''shortest_edge'''] , default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
lowerCAmelCase : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _A ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[Any] , ):
lowerCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _A ( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ):
lowerCAmelCase : Union[str, Any] = image.astype(np.floataa )
if offset:
lowerCAmelCase : Any = image - (scale / 2)
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _A ( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[Any] , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _A ( self : List[str] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase : Tuple = to_numpy_array(lowerCamelCase__ )
if do_resize:
lowerCAmelCase : Any = self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ )
if do_center_crop:
lowerCAmelCase : Dict = self.center_crop(lowerCamelCase__ , size=lowerCamelCase__ )
if do_rescale:
lowerCAmelCase : Union[str, Any] = self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ , offset=lowerCamelCase__ )
if do_normalize:
lowerCAmelCase : Union[str, Any] = self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ )
lowerCAmelCase : Optional[int] = to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ )
return image
def _A ( self : Tuple , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : int , ):
lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Optional[Any] = resample if resample is not None else self.resample
lowerCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[Any] = offset if offset is not None else self.offset
lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Any = size if size is not None else self.size
lowerCAmelCase : Optional[Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : List[Any] = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase : Any = make_batched(lowerCamelCase__ )
lowerCAmelCase : Any = [
[
self._preprocess_image(
image=lowerCamelCase__ , do_resize=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , crop_size=lowerCamelCase__ , do_rescale=lowerCamelCase__ , rescale_factor=lowerCamelCase__ , offset=lowerCamelCase__ , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , data_format=lowerCamelCase__ , )
for img in video
]
for video in videos
]
lowerCAmelCase : Union[str, Any] = {'''pixel_values''': videos}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 348 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( snake_case = "https://www.worldometers.info/coronavirus"):
__snake_case = BeautifulSoup(requests.get(_lowerCamelCase).text, '''html.parser''')
__snake_case = soup.findAll('''h1''')
__snake_case = soup.findAll('''div''', {'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''', {'''class''': '''panel-title'''})
values += soup.findAll('''div''', {'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase, _lowerCamelCase)}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""") | 700 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = len(snake_case)
for i in range(length - 1):
__snake_case = i
for k in range(i + 1, snake_case):
if collection[k] < collection[least]:
__snake_case = k
if least != i:
__snake_case , __snake_case = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowercase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__lowercase : Tuple = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted)) | 93 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] =logging.get_logger(__name__)
__lowercase : Optional[Any] ={
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''xlm-prophetnet'''
_snake_case =['''past_key_values''']
_snake_case ={
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self: int , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[Union[str, Callable]] = "gelu" , _lowerCAmelCase: Optional[int] = 3_0522 , _lowerCAmelCase: Optional[int] = 1024 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[int] = 512 , _lowerCAmelCase: Optional[float] = 0.02 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 2 , _lowerCAmelCase: Optional[int] = 32 , _lowerCAmelCase: Optional[int] = 128 , _lowerCAmelCase: Optional[bool] = False , _lowerCAmelCase: Optional[float] = 0.0 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: Optional[int] = 2 , **_lowerCAmelCase: str , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =encoder_ffn_dim
UpperCAmelCase_ =num_encoder_layers
UpperCAmelCase_ =num_encoder_attention_heads
UpperCAmelCase_ =decoder_ffn_dim
UpperCAmelCase_ =num_decoder_layers
UpperCAmelCase_ =num_decoder_attention_heads
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =init_std # Normal(0, this parameter)
UpperCAmelCase_ =activation_function
# parameters for xlmprophetnet
UpperCAmelCase_ =ngram
UpperCAmelCase_ =num_buckets
UpperCAmelCase_ =relative_max_distance
UpperCAmelCase_ =disable_ngram_loss
UpperCAmelCase_ =eps
# 3 Types of Dropout
UpperCAmelCase_ =attention_dropout
UpperCAmelCase_ =activation_dropout
UpperCAmelCase_ =dropout
UpperCAmelCase_ =use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 54 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase , _lowercase ):
A__ = 'focalnet'
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[int] = use_conv_embed
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Dict = focal_levels
lowerCAmelCase__ : int = focal_windows
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = mlp_ratio
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Tuple = use_layerscale
lowerCAmelCase__ : List[Any] = layerscale_value
lowerCAmelCase__ : Dict = use_post_layernorm
lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation
lowerCAmelCase__ : Dict = normalize_modulator
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = encoder_stride
lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 678 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase ={
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase ={
"bert-base-uncased": 5_1_2,
"bert-large-uncased": 5_1_2,
"bert-base-cased": 5_1_2,
"bert-large-cased": 5_1_2,
"bert-base-multilingual-uncased": 5_1_2,
"bert-base-multilingual-cased": 5_1_2,
"bert-base-chinese": 5_1_2,
"bert-base-german-cased": 5_1_2,
"bert-large-uncased-whole-word-masking": 5_1_2,
"bert-large-cased-whole-word-masking": 5_1_2,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-base-cased-finetuned-mrpc": 5_1_2,
"bert-base-german-dbmdz-cased": 5_1_2,
"bert-base-german-dbmdz-uncased": 5_1_2,
"TurkuNLP/bert-base-finnish-cased-v1": 5_1_2,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_1_2,
"wietsedv/bert-base-dutch-cased": 5_1_2,
}
lowerCamelCase ={
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = BertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase__ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) )
UpperCamelCase__ : int = do_lower_case
UpperCamelCase__ : Optional[Any] = strip_accents
UpperCamelCase__ : List[Any] = tokenize_chinese_chars
UpperCamelCase__ : int = normalizer_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : str = [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 705 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCamelCase__ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase__ : Any = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
UpperCamelCase__ : Tuple = PipelineDataFormat.from_str(
format=UpperCamelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCamelCase__ , UpperCamelCase__ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = nlp
UpperCamelCase__ : Dict = reader
@staticmethod
def __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__SCREAMING_SNAKE_CASE , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self._nlp, []
for entry in self._reader:
UpperCamelCase__ : List[str] = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase__ : Union[str, Any] = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 462 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = {}
def snake_case_ ( self , __A ):
__a = {}
def snake_case_ ( self , __A , __A , __A ):
if nodea not in self.connections:
self.add_node(__A )
if nodea not in self.connections:
self.add_node(__A )
__a = probability
def snake_case_ ( self ):
return list(self.connections )
def snake_case_ ( self , __A ):
__a = 0
__a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = Counter(graph.get_nodes() )
__a = start
for _ in range(lowerCAmelCase__ ):
__a = graph.transition(lowerCAmelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , ):
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : Any = image_size
lowercase : Union[str, Any] = patch_size
lowercase : Optional[Any] = num_channels
lowercase : Optional[int] = is_training
lowercase : Dict = use_labels
lowercase : Union[str, Any] = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Optional[Any] = backbone_out_indices
lowercase : Dict = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : str = num_labels
lowercase : str = backbone_featmap_shape
lowercase : Optional[int] = scope
lowercase : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] = (image_size // patch_size) ** 2
lowercase : int = num_patches + 1
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : int = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
lowercase : List[str] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = DPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[Any] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = self.num_labels
lowercase : List[str] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
A : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : str = False
A : Union[str, Any] = False
A : List[Any] = False
def __lowerCamelCase ( self ):
lowercase : List[str] = DPTModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __lowerCamelCase ( self ):
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = True
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
continue
lowercase : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict = False
lowercase : Optional[int] = True
if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing:
continue
lowercase : Any = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowercase : List[Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
# Skip the check for the backbone
lowercase : Tuple = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase : List[Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase : Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase : Optional[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Any = prepare_img()
lowercase : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowercase : Any = model(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
lowercase : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 319 | 0 |
def lowerCamelCase__ ( _A ):
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
a : Any = ''
while len(_A ) % 3 != 0:
a : str = '0' + bin_string
a : Any = [
bin_string[index : index + 3]
for index in range(len(_A ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a : Optional[Any] = 0
for index, val in enumerate(_A ):
oct_val += int(2 ** (2 - index) * int(_A ) )
oct_string += str(_A )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod() | 707 |
'''simple docstring'''
class a__:
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Tuple ):
a : List[str] = name
a : Dict = value
a : List[str] = weight
def __repr__( self : int ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase_ ( self : Optional[int] ):
return self.value
def lowercase_ ( self : List[str] ):
return self.name
def lowercase_ ( self : int ):
return self.weight
def lowercase_ ( self : List[str] ):
return self.value / self.weight
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = []
for i in range(len(_A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[Any] = sorted(_A , key=_A , reverse=_A )
a : Optional[int] = []
a , a : str = 0.0, 0.0
for i in range(len(_A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 195 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return number | (1 << position)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return number & ~(1 << position)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return number ^ (1 << position)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return ((number >> position) & 1) == 1
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( snake_case ):
UpperCAmelCase : Any = """deformable_detr"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , a_ : Optional[int]=True , a_ : Optional[int]=None , a_ : Tuple=3 , a_ : Optional[int]=3_0_0 , a_ : Tuple=1_0_2_4 , a_ : Union[str, Any]=6 , a_ : List[str]=1_0_2_4 , a_ : Union[str, Any]=8 , a_ : Any=6 , a_ : int=1_0_2_4 , a_ : Tuple=8 , a_ : Dict=0.0 , a_ : Union[str, Any]=True , a_ : Optional[int]="relu" , a_ : int=2_5_6 , a_ : Any=0.1 , a_ : Optional[int]=0.0 , a_ : Tuple=0.0 , a_ : Union[str, Any]=0.0_2 , a_ : List[Any]=1.0 , a_ : Optional[int]=True , a_ : Tuple=False , a_ : Optional[Any]="sine" , a_ : Optional[int]="resnet50" , a_ : List[str]=True , a_ : List[Any]=False , a_ : List[str]=4 , a_ : Tuple=4 , a_ : str=4 , a_ : Tuple=False , a_ : Optional[int]=3_0_0 , a_ : int=False , a_ : Optional[Any]=1 , a_ : int=5 , a_ : Optional[int]=2 , a_ : str=1 , a_ : Optional[int]=1 , a_ : Optional[Any]=5 , a_ : Optional[Any]=2 , a_ : List[Any]=0.1 , a_ : List[Any]=0.2_5 , a_ : Dict=False , **a_ : str , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case: Union[str, Any] =CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a_ , a_ ):
snake_case: Dict =backbone_config.get('model_type' )
snake_case: List[Any] =CONFIG_MAPPING[backbone_model_type]
snake_case: str =config_class.from_dict(a_ )
snake_case: List[str] =use_timm_backbone
snake_case: Optional[int] =backbone_config
snake_case: List[Any] =num_channels
snake_case: List[Any] =num_queries
snake_case: List[Any] =max_position_embeddings
snake_case: str =d_model
snake_case: Tuple =encoder_ffn_dim
snake_case: str =encoder_layers
snake_case: List[Any] =encoder_attention_heads
snake_case: Optional[Any] =decoder_ffn_dim
snake_case: Union[str, Any] =decoder_layers
snake_case: int =decoder_attention_heads
snake_case: Union[str, Any] =dropout
snake_case: Optional[int] =attention_dropout
snake_case: Any =activation_dropout
snake_case: Dict =activation_function
snake_case: Union[str, Any] =init_std
snake_case: Union[str, Any] =init_xavier_std
snake_case: Tuple =encoder_layerdrop
snake_case: List[str] =auxiliary_loss
snake_case: Dict =position_embedding_type
snake_case: Any =backbone
snake_case: Union[str, Any] =use_pretrained_backbone
snake_case: List[Any] =dilation
# deformable attributes
snake_case: Union[str, Any] =num_feature_levels
snake_case: str =encoder_n_points
snake_case: str =decoder_n_points
snake_case: Any =two_stage
snake_case: Dict =two_stage_num_proposals
snake_case: str =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
snake_case: Dict =class_cost
snake_case: Dict =bbox_cost
snake_case: int =giou_cost
# Loss coefficients
snake_case: Dict =mask_loss_coefficient
snake_case: int =dice_loss_coefficient
snake_case: List[str] =bbox_loss_coefficient
snake_case: Tuple =giou_loss_coefficient
snake_case: Union[str, Any] =eos_coefficient
snake_case: List[Any] =focal_alpha
snake_case: str =disable_custom_kernels
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : List[str] ) -> int:
return self.d_model
def UpperCamelCase ( self : str ) -> Optional[Any]:
snake_case: Dict =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case: Union[str, Any] =self.backbone_config.to_dict()
snake_case: Dict =self.__class__.model_type
return output
| 347 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ="The Nymphenburg Palace is a beautiful palace in Munich!"
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
__SCREAMING_SNAKE_CASE = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__SCREAMING_SNAKE_CASE = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__SCREAMING_SNAKE_CASE = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__SCREAMING_SNAKE_CASE = os.path.join(get_home_dir() , '''models''' )
__SCREAMING_SNAKE_CASE = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = original_bort._collect_params_with_prefix()
# Build our config 🤗
__SCREAMING_SNAKE_CASE = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCamelCase ),
}
__SCREAMING_SNAKE_CASE = BertConfig.from_dict(__lowerCamelCase )
__SCREAMING_SNAKE_CASE = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCAmelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = hf_param.shape
__SCREAMING_SNAKE_CASE = to_torch(params[gluon_param] )
__SCREAMING_SNAKE_CASE = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
__SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
__SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
__SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__SCREAMING_SNAKE_CASE = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__SCREAMING_SNAKE_CASE = hf_bort_model.bert.encoder.layer[i]
# self attention
__SCREAMING_SNAKE_CASE = layer.attention.self
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__SCREAMING_SNAKE_CASE = layer.attention.output
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__SCREAMING_SNAKE_CASE = layer.intermediate
__SCREAMING_SNAKE_CASE = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__SCREAMING_SNAKE_CASE = layer.output
__SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('''roberta-base''' )
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(__lowerCamelCase )['''input_ids''']
# Get gluon output
__SCREAMING_SNAKE_CASE = mx.nd.array([input_ids] )
__SCREAMING_SNAKE_CASE = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(__lowerCamelCase , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE = hf_bort_model(**__lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = output_gluon[0].asnumpy()
__SCREAMING_SNAKE_CASE = output_hf[0].detach().numpy()
__SCREAMING_SNAKE_CASE = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__SCREAMING_SNAKE_CASE = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase__ =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 482 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCamelCase : Any = TypeVar("KT")
__UpperCamelCase : List[Any] = TypeVar("VT")
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self :Union[str, Any] , __magic_name__ :KT | str = "root" , __magic_name__ :VT | None = None ):
'''simple docstring'''
a = key
a = value
a = []
def __repr__( self :List[Any] ):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return len(self.forward )
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self :Optional[Any] , __magic_name__ :float = 0.5 , __magic_name__ :int = 16 ):
'''simple docstring'''
a = Node[KT, VT]()
a = 0
a = p
a = max_level
def __str__( self :Tuple ):
'''simple docstring'''
a = list(self )
if len(__magic_name__ ) == 0:
return F'SkipList(level={self.level})'
a = max((len(str(__magic_name__ ) ) for item in items) , default=4 )
a = max(__magic_name__ , 4 ) + 4
a = self.head
a = []
a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__magic_name__ , """-""" ) + """* """ * len(__magic_name__ ) )
lines.append(""" """ * label_size + """| """ * len(__magic_name__ ) )
while len(node.forward ) != 0:
a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__magic_name__ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(__magic_name__ ) )
a = node.forward
lines.append("""None""".ljust(__magic_name__ ) + """* """ * len(__magic_name__ ) )
return F'SkipList(level={self.level})\n' + "\n".join(__magic_name__ )
def __iter__( self :int ):
'''simple docstring'''
a = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
a = node.forward[0]
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowerCamelCase__ ( self :Dict , __magic_name__ :int ):
'''simple docstring'''
a = []
a = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__magic_name__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowerCamelCase__ ( self :List[str] , __magic_name__ :KT ):
'''simple docstring'''
a , a = self._locate_node(__magic_name__ )
if node is not None:
for i, update_node in enumerate(__magic_name__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
a = node.forward[i]
else:
a = update_node.forward[:i]
def lowerCamelCase__ ( self :List[str] , __magic_name__ :KT , __magic_name__ :VT ):
'''simple docstring'''
a , a = self._locate_node(__magic_name__ )
if node is not None:
a = value
else:
a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __magic_name__ ):
update_vector.append(self.head )
a = level
a = Node(__magic_name__ , __magic_name__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__magic_name__ )
else:
a = new_node
def lowerCamelCase__ ( self :Dict , __magic_name__ :VT ):
'''simple docstring'''
a , a = self._locate_node(__magic_name__ )
if node is not None:
return node.value
return None
def __A ( ) -> Tuple:
a = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
a = skip_list.head
a = {}
while node.level != 0:
a = node.forward[0]
a = node.value
assert len(__lowerCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A ( ) -> Tuple:
a = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
a = skip_list.head
a = {}
while node.level != 0:
a = node.forward[0]
a = node.value
if len(__lowerCamelCase ) != 4:
print()
assert len(__lowerCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A ( ) -> Optional[int]:
a = SkipList()
assert skip_list.find("""Some key""" ) is None
def __A ( ) -> Optional[int]:
a = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def __A ( ) -> str:
a = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __A ( ) -> int:
a = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __A ( ) -> Union[str, Any]:
a = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __A ( ) -> Dict:
a = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(__lowerCamelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__lowerCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A ( ) -> Optional[Any]:
def is_sorted(__lowerCamelCase ):
return all(next_item >= item for item, next_item in zip(__lowerCamelCase , lst[1:] ) )
a = SkipList()
for i in range(10 ):
skip_list.insert(__lowerCamelCase , __lowerCamelCase )
assert is_sorted(list(__lowerCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__lowerCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__lowerCamelCase ) )
def __A ( ) -> Optional[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A ( ) -> Optional[int]:
a = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 468 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=False , lowercase=True , lowercase=False , lowercase=False , lowercase=19 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__A , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = EsmForProteinFolding(config=__A ).float()
model.to(__A )
model.eval()
A__ = model(__A , attention_mask=__A )
A__ = model(__A )
A__ = model(__A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = False
__lowerCamelCase = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCamelCase = ()
__lowerCamelCase = {} if is_torch_available() else {}
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = EsmFoldModelTester(self )
A__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip("Does not support attention outputs" )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("ESMFold only has one output format." )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("This test doesn\'t work for ESMFold and doesn\'t test core functionality" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support input chunking." )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments." )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn\'t support data parallel." )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
@require_torch
class a__ ( __lowercase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
A__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ = model(__A )["positions"]
A__ = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __A , atol=1e-4 ) )
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , lowercase , lowercase=False ) -> int:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase , key=lambda lowercase : item[0] )[0]
A__ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 39769, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = DetaImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
A__ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 626 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCAmelCase = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_lowerCAmelCase = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_lowerCAmelCase = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : int , _A : int=4 , _A : Union[str, Any]=False ):
_UpperCamelCase = compute_bleu(
reference_corpus=_A , translation_corpus=_A , max_order=_A , smooth=_A )
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 10 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase ( yaml.SafeLoader ):
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
_snake_case = [tuple(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else key for key in keys]
_snake_case = Counter(__lowerCamelCase )
_snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
_snake_case = super().construct_mapping(__lowerCamelCase , deep=__lowerCamelCase )
self._check_no_duplicates_on_constructed_node(__lowerCamelCase )
return mapping
def snake_case ( lowerCAmelCase_ ) -> Tuple[Optional[str], str]:
_snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_snake_case = full_content[1:].index('''---''' ) + 1
_snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
# class attributes
A__ : int = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __UpperCAmelCase ( cls : str , __lowerCamelCase : Path ):
"""simple docstring"""
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
_snake_case , _snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowerCamelCase )
else:
return cls()
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Path ):
"""simple docstring"""
if path.exists():
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
_snake_case = readme_file.read()
else:
_snake_case = None
_snake_case = self._to_readme(__lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
_snake_case , _snake_case = _split_yaml_from_readme(__lowerCamelCase )
_snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __UpperCAmelCase ( cls : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = yaml.load(__lowerCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowerCamelCase , allow_unicode=__lowerCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
snake_case = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
snake_case = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
snake_case = ap.parse_args()
snake_case = Path(args.readme_filepath)
snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 103 | 0 |
_UpperCAmelCase : List[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCAmelCase : str = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 715 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''blip_2_vision_model'''
def __init__( self , snake_case=1408 , snake_case=6144 , snake_case=39 , snake_case=16 , snake_case=224 , snake_case=14 , snake_case="gelu" , snake_case=0.0_00_01 , snake_case=0.0 , snake_case=1e-1_0 , snake_case=True , **snake_case , ):
super().__init__(**snake_case )
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_range
snake_case_ = attention_dropout
snake_case_ = layer_norm_eps
snake_case_ = hidden_act
snake_case_ = qkv_bias
@classmethod
def a ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
snake_case_ , snake_case_ = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = '''blip_2_qformer'''
def __init__( self , snake_case=3_0522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1408 , **snake_case , ):
super().__init__(pad_token_id=snake_case , **snake_case )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = cross_attention_frequency
snake_case_ = encoder_hidden_size
@classmethod
def a ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
snake_case_ , snake_case_ = cls.get_config_dict(snake_case , **snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case_ = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''blip-2'''
__SCREAMING_SNAKE_CASE : Dict = True
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=32 , **snake_case ):
super().__init__(**snake_case )
if vision_config is None:
snake_case_ = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
snake_case_ = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
snake_case_ = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
snake_case_ = BlipaVisionConfig(**snake_case )
snake_case_ = BlipaQFormerConfig(**snake_case )
snake_case_ = text_config['model_type'] if 'model_type' in text_config else 'opt'
snake_case_ = CONFIG_MAPPING[text_model_type](**snake_case )
snake_case_ = self.text_config.tie_word_embeddings
snake_case_ = self.text_config.is_encoder_decoder
snake_case_ = num_query_tokens
snake_case_ = self.vision_config.hidden_size
snake_case_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case_ = 1.0
snake_case_ = 0.02
@classmethod
def a ( cls , snake_case , snake_case , snake_case , **snake_case , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.qformer_config.to_dict()
snake_case_ = self.text_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 108 | 0 |
import math
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = 0 ) -> list:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = end or len(_UpperCamelCase )
for i in range(_UpperCamelCase , _UpperCamelCase ):
lowerCamelCase__: Tuple = i
lowerCamelCase__: List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase__: Any = array[temp_index - 1]
temp_index -= 1
lowerCamelCase__: Tuple = temp_index_value
return array
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None: # Max Heap
'''simple docstring'''
lowerCamelCase__: Dict = index
lowerCamelCase__: str = 2 * index + 1 # Left Node
lowerCamelCase__: Tuple = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase__: Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase__: Optional[Any] = right_index
if largest != index:
lowerCamelCase__ , lowerCamelCase__: Any = array[largest], array[index]
heapify(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase ) -> list:
'''simple docstring'''
lowerCamelCase__: List[str] = len(_UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase__ , lowerCamelCase__: Any = array[0], array[i]
heapify(_UpperCamelCase , 0 , _UpperCamelCase )
return array
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = low
lowerCamelCase__: Optional[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase__ , lowerCamelCase__: int = array[j], array[i]
i += 1
def __lowerCAmelCase ( _UpperCamelCase ) -> list:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return array
lowerCamelCase__: List[Any] = 2 * math.ceil(math.loga(len(_UpperCamelCase ) ) )
lowerCamelCase__: Optional[Any] = 16
return intro_sort(_UpperCamelCase , 0 , len(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCamelCase )
max_depth -= 1
lowerCamelCase__: Any = median_of_a(_UpperCamelCase , _UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase__: Optional[Any] = partition(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
intro_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Optional[Any] = p
return insertion_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('Enter numbers separated by a comma : ').strip()
_lowercase = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 306 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( A__ ):
def __init__( self : Tuple , *__a : Tuple , __a : Dict=None , __a : List[str]=None , **__a : Dict ):
'''simple docstring'''
super().__init__(*__a , **__a )
lowerCamelCase__: str = eval_examples
lowerCamelCase__: Optional[int] = post_process_function
def lowerCamelCase_ ( self : str , __a : Optional[Dataset] = None , __a : List[Any]=None , __a : Optional[List[str]] = None , __a : str = "eval" , **__a : Tuple , ):
'''simple docstring'''
lowerCamelCase__: Tuple = gen_kwargs.copy()
lowerCamelCase__: Union[str, Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
lowerCamelCase__: Tuple = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
lowerCamelCase__: Optional[Any] = gen_kwargs
lowerCamelCase__: List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase__: Union[str, Any] = self.get_eval_dataloader(__a )
lowerCamelCase__: Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Optional[int] = self.compute_metrics
lowerCamelCase__: Union[str, Any] = None
lowerCamelCase__: Dict = time.time()
lowerCamelCase__: Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: Any = eval_loop(
__a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: int = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase__: Tuple = self.post_process_function(__a , __a , __a )
lowerCamelCase__: List[Any] = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
else:
lowerCamelCase__: int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase__: List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def lowerCamelCase_ ( self : str , __a : List[str] , __a : List[Any] , __a : Tuple=None , __a : str = "test" , **__a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = gen_kwargs.copy()
lowerCamelCase__: Optional[Any] = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Any = self.compute_metrics
lowerCamelCase__: Optional[int] = None
lowerCamelCase__: int = time.time()
lowerCamelCase__: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: List[str] = eval_loop(
__a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase__: str = self.post_process_function(__a , __a , __a , """predict""" )
lowerCamelCase__: str = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 306 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__snake_case : int = TypeVar("T")
__snake_case : List[Any] = TypeVar("U")
class A ( Generic[T, U] ):
def __init__( self , snake_case_ , snake_case_ ) -> Any:
_a = key
_a = val
_a = None
_a = None
def __repr__( self ) -> str:
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class A ( Generic[T, U] ):
def __init__( self ) -> None:
_a = DoubleLinkedListNode(snake_case_ , snake_case_ )
_a = DoubleLinkedListNode(snake_case_ , snake_case_ )
_a , _a = self.rear, self.head
def __repr__( self ) -> str:
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(snake_case_ ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> None:
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def __lowerCAmelCase ( self , snake_case_ ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class A ( Generic[T, U] ):
__UpperCAmelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , snake_case_ ) -> Any:
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self ) -> str:
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , snake_case_ ) -> bool:
return key in self.cache
def __lowerCAmelCase ( self , snake_case_ ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case_ )
return node.val
self.miss += 1
return None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(snake_case_ , snake_case_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ = 1_2_8 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(snake_case_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(snake_case_ )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*snake_case_ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case_ , "cache_info" , snake_case_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionXLImgaImgPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__magic_name__ = PipelineTesterMixin.required_optional_params - {"latents"}
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase : List[Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
_lowerCAmelCase : Optional[Any] = CLIPTextModel(snake_case__ )
_lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=snake_case__ )
_lowerCAmelCase : List[Any] = CLIPTextModelWithProjection(snake_case__ )
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=snake_case__ )
_lowerCAmelCase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Tuple = image / 2 + 0.5
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : List[str] = StableDiffusionXLImgaImgPipeline(**snake_case__ )
_lowerCAmelCase : Tuple = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Optional[Any] = sd_pipe(**snake_case__ ).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**snake_case__ )
_lowerCAmelCase : str = sd_pipe.to(snake_case__ )
_lowerCAmelCase : Any = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
# forward without prompt embeds
_lowerCAmelCase : Any = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Union[str, Any] = 3 * ['this is a negative prompt']
_lowerCAmelCase : Any = negative_prompt
_lowerCAmelCase : int = 3 * [inputs['prompt']]
_lowerCAmelCase : Optional[Any] = sd_pipe(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : List[str] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : int = 3 * ['this is a negative prompt']
_lowerCAmelCase : Optional[Any] = 3 * [inputs.pop('prompt' )]
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = sd_pipe.encode_prompt(snake_case__ , negative_prompt=snake_case__ )
_lowerCAmelCase : int = sd_pipe(
**snake_case__ , prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , pooled_prompt_embeds=snake_case__ , negative_pooled_prompt_embeds=snake_case__ , )
_lowerCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : List[str] = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_lowerCAmelCase : int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Any = self.get_inputs(snake_case__ )
_lowerCAmelCase : List[str] = pipe(**snake_case__ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : str = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 444 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase : Optional[int] = datasets.logging.get_logger(__name__)
lowerCAmelCase : List[str] = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
lowerCAmelCase : List[Any] = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
lowerCAmelCase : str = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
lowerCAmelCase : Optional[Any] = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def a ( self , snake_case__ ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
_lowerCAmelCase : Tuple = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
_lowerCAmelCase : int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_lowerCAmelCase : str = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_lowerCAmelCase : str = score.BleurtScorer(os.path.join(snake_case__ , snake_case__ ) )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scorer.score(references=snake_case__ , candidates=snake_case__ )
return {"scores": scores}
| 444 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCamelCase : Optional[int] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCamelCase : List[str] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCamelCase : Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : Optional[int] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : List[str] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCamelCase : Dict = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : Tuple = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCamelCase : Optional[int] = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : List[Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCamelCase : Optional[int] = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : List[Any] = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCamelCase : Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : Tuple = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCamelCase : Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCamelCase : Tuple = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCamelCase : Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCamelCase : Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCamelCase : List[str] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCamelCase : str = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCamelCase : List[str] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCamelCase : List[str] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCamelCase : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCamelCase : Optional[int] = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : List[Any] = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCamelCase : Tuple = ''''''
lowerCamelCase : int = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCamelCase : List[str] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase : Union[str, Any] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __lowerCAmelCase ( __snake_case , __snake_case ):
assert ReadMe.from_string(__snake_case , __snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __lowerCAmelCase ( __snake_case , __snake_case ):
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
__lowerCAmelCase = ReadMe.from_string(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( __snake_case , __snake_case ):
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(__snake_case , __snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( __snake_case ):
ReadMe.from_string(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __lowerCAmelCase ( __snake_case , __snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
__lowerCAmelCase = ReadMe.from_readme(__snake_case , __snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __lowerCAmelCase ( __snake_case , __snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
__lowerCAmelCase = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
__lowerCAmelCase = ReadMe.from_readme(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( __snake_case , __snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
__lowerCAmelCase = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
ReadMe.from_readme(__snake_case , __snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( __snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
ReadMe.from_readme(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
| 290 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __snake_case ).groups()[0]
class _UpperCamelCase (a_ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Dict:
__lowerCAmelCase = file_names
__lowerCAmelCase = image_transform
__lowerCAmelCase = label_to_id
def __len__( self )-> Optional[int]:
return len(self.file_names )
def __getitem__( self , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = self.file_names[idx]
__lowerCAmelCase = PIL.Image.open(__UpperCamelCase )
__lowerCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
__lowerCAmelCase = self.image_transform(__UpperCamelCase )
__lowerCAmelCase = extract_label(__UpperCamelCase )
if self.label_to_id is not None:
__lowerCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( __snake_case , __snake_case ):
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = config["image_size"]
if not isinstance(__snake_case , (list, tuple) ):
__lowerCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case , __snake_case )
# Grab all the image filenames
__lowerCAmelCase = [os.path.join(args.data_dir , __snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase = int(0.8 * len(__snake_case ) )
__lowerCAmelCase = random_perm[:cut]
__lowerCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase = Compose([RandomResizedCrop(__snake_case , scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__snake_case , label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__snake_case , label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = create_model("resnet50d" , pretrained=__snake_case , num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase = False
for param in model.get_classifier().parameters():
__lowerCAmelCase = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase = OneCycleLR(optimizer=__snake_case , max_lr=__snake_case , epochs=__snake_case , steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
__lowerCAmelCase = None
else:
__lowerCAmelCase = int(training_difference.replace("step_" , "" ) )
__lowerCAmelCase = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case , __snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase = accelerator.skip_first_batches(__snake_case , __snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = torch.nn.functional.cross_entropy(__snake_case , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case , __snake_case ):
__lowerCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} , step=__snake_case , )
if checkpointing_steps == "epoch":
__lowerCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__snake_case , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__snake_case , default=__snake_case , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__snake_case , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 290 | 1 |
import colorsys
from PIL import Image # type: ignore
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : Optional[int] = x
snake_case : Tuple = y
for step in range(lowerCAmelCase__ ): # noqa: B007
snake_case : Union[str, Any] = a * a - b * b + x
snake_case : int = 2 * a * b + y
snake_case : int = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ ,1 ,1 ) )
def SCREAMING_SNAKE_CASE__ ( lowercase = 800 ,lowercase = 600 ,lowercase = -0.6 ,lowercase = 0 ,lowercase = 3.2 ,lowercase = 50 ,lowercase = True ,) -> List[str]:
snake_case : Tuple = Image.new("""RGB""" ,(image_width, image_height) )
snake_case : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
snake_case : List[Any] = figure_width / image_width * image_height
snake_case : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case : Any = get_distance(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case : Any = get_color_coded_rgb(lowerCAmelCase__ )
else:
snake_case : Optional[int] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 587 |
import argparse
import os
import re
_lowercase : List[str] ="""src/diffusers"""
# Pattern that looks at the indentation in a line.
_lowercase : str =re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : Dict =re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : Union[str, Any] =re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : Dict =re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : Tuple =re.compile(r"""\[([^\]]+)\]""")
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : int = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__="" ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Union[str, Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
lowerCamelCase_ : Any = ['\n'.join(lines[:index] )]
else:
lowerCamelCase_ : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ : List[str] = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
lowerCamelCase_ : int = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ : List[str] = []
else:
blocks.append('\n'.join(lowerCAmelCase__ ) )
lowerCamelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('\n'.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
def _inner(lowerCAmelCase__ ):
return key(lowerCAmelCase__ ).lower().replace('_' ,'' )
return _inner
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
# If no key is provided, we use a noop.
def noop(lowerCAmelCase__ ):
return x
if key is None:
lowerCamelCase_ : int = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ : Any = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ : Dict = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ : Any = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
lowerCamelCase_ : Optional[Any] = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCamelCase_ : Optional[int] = [part.strip().replace('"' ,'' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ : str = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] ) + "]"
lowerCamelCase_ : Tuple = import_statement.split('\n' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ : int = 2 if lines[1].strip() == '[' else 1
lowerCamelCase_ : Any = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ : str = sort_objects(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[1] )
lowerCamelCase_ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ : Optional[int] = _re_bracket_content.sub(_replace ,lines[1] )
else:
lowerCamelCase_ : Any = [part.strip().replace('"' ,'' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ : List[Any] = keys[:-1]
lowerCamelCase_ : Optional[Any] = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ : Any = _re_bracket_content.sub(_replace ,lowerCAmelCase__ )
return import_statement
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__=True ):
with open(lowerCAmelCase__ ,'r' ) as f:
lowerCamelCase_ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ : int = split_code_in_indented_blocks(
lowerCAmelCase__ ,start_prompt='_import_structure = {' ,end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ : Any = main_blocks[block_idx]
lowerCamelCase_ : Tuple = block.split('\n' )
# Get to the start of the imports.
lowerCamelCase_ : Optional[int] = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ : List[Any] = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ : Tuple = '\n'.join(block_lines[line_idx:-1] )
lowerCamelCase_ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ : Dict = split_code_in_indented_blocks(lowerCAmelCase__ ,indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ : List[str] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ : Tuple = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ : Any = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
lowerCamelCase_ : Optional[Any] = [x[0] for x in sorted(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ : Tuple = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCAmelCase__ ,'w' ) as f:
f.write('\n'.join(lowerCAmelCase__ ) )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__=True ):
lowerCamelCase_ : Dict = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
lowerCamelCase_ : Optional[int] = sort_imports(os.path.join(lowerCAmelCase__ ,'__init__.py' ) ,check_only=lowerCAmelCase__ )
if result:
lowerCamelCase_ : Dict = [os.path.join(lowerCAmelCase__ ,'__init__.py' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(F"Would overwrite {len(lowerCAmelCase__ )} files, run `make style`." )
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowercase : Union[str, Any] =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 364 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=None ) -> int:
UpperCAmelCase_ : List[Any] = np.random.default_rng(_lowerCAmelCase )
UpperCAmelCase_ : str = length
UpperCAmelCase_ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase_ : Optional[int] = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa )
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> Any:
return {"x": self.x[i], "y": self.y[i]}
class __a( torch.nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
super().__init__()
UpperCAmelCase_ : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : List[Any] = True
def a__ ( self ,_SCREAMING_SNAKE_CASE=None ) -> int:
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : int = False
return x * self.a[0] + self.b[0]
class __a( torch.nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=False ) -> List[Any]:
super().__init__()
UpperCAmelCase_ : Tuple = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
UpperCAmelCase_ : str = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
UpperCAmelCase_ : Optional[int] = True
def a__ ( self ,_SCREAMING_SNAKE_CASE=None ) -> str:
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : Any = False
return x * self.a + self.b
def lowerCamelCase__ ( _lowercase , _lowercase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ : Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ : Tuple = load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCAmelCase_ : int = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ : str = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Tuple = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ : Dict = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : List[Any] = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : List[Any] = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
UpperCAmelCase_ : Any = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 30 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
def A_ ( A__ ) -> int:
a__ : Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a__ : Dict = 128
elif "12-12" in model_name:
a__ : Optional[int] = 12
a__ : Optional[int] = 12
elif "14-14" in model_name:
a__ : Any = 14
a__ : Dict = 14
elif "16-16" in model_name:
a__ : List[str] = 16
a__ : List[Any] = 16
else:
raise ValueError('Model not supported' )
a__ : Dict = """huggingface/label-files"""
if "speech-commands" in model_name:
a__ : str = 35
a__ : Union[str, Any] = """speech-commands-v2-id2label.json"""
else:
a__ : List[Any] = 527
a__ : Union[str, Any] = """audioset-id2label.json"""
a__ : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
a__ : int = {int(__A ): v for k, v in idalabel.items()}
a__ : Any = idalabel
a__ : List[str] = {v: k for k, v in idalabel.items()}
return config
def A_ ( A__ ) -> List[str]:
if "module.v" in name:
a__ : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
a__ : List[str] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
a__ : int = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
a__ : str = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a__ : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
a__ : Any = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
a__ : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a__ : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a__ : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a__ : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a__ : int = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a__ : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a__ : str = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
a__ : List[str] = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
a__ : Optional[Any] = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def A_ ( A__ , A__ ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
a__ : Dict = orig_state_dict.pop(__A )
if "qkv" in key:
a__ : Tuple = key.split('.' )
a__ : int = int(key_split[3] )
a__ : Any = config.hidden_size
if "weight" in key:
a__ : Union[str, Any] = val[:dim, :]
a__ : Union[str, Any] = val[dim : dim * 2, :]
a__ : List[Any] = val[-dim:, :]
else:
a__ : Any = val[:dim]
a__ : List[Any] = val[dim : dim * 2]
a__ : Optional[Any] = val[-dim:]
else:
a__ : Dict = val
return orig_state_dict
def A_ ( A__ ) -> str:
a__ : Union[str, Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
@torch.no_grad()
def A_ ( A__ , A__ , A__=False ) -> List[Any]:
a__ : Union[str, Any] = get_audio_spectrogram_transformer_config(__A )
a__ : List[str] = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
a__ : List[Any] = model_name_to_url[model_name]
a__ : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location='cpu' )
# remove some keys
remove_keys(__A )
# rename some keys
a__ : str = convert_state_dict(__A , __A )
# load 🤗 model
a__ : Tuple = ASTForAudioClassification(__A )
model.eval()
model.load_state_dict(__A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a__ : Optional[int] = -4.2_67_73_93 if """speech-commands""" not in model_name else -6.84_59_78
a__ : Dict = 4.5_68_99_74 if """speech-commands""" not in model_name else 5.5_65_45_26
a__ : int = 1024 if """speech-commands""" not in model_name else 128
a__ : List[Any] = ASTFeatureExtractor(mean=__A , std=__A , max_length=__A )
if "speech-commands" in model_name:
a__ : str = load_dataset('speech_commands' , 'v0.02' , split='validation' )
a__ : List[str] = dataset[0]["""audio"""]["""array"""]
else:
a__ : Tuple = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
a__ : Optional[Any] = torchaudio.load(__A )
a__ : int = waveform.squeeze().numpy()
a__ : Optional[Any] = feature_extractor(__A , sampling_rate=1_6000 , return_tensors='pt' )
# forward pass
a__ : Optional[int] = model(**__A )
a__ : Union[str, Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a__ : Optional[Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a__ : Any = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a__ : Tuple = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a__ : Any = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a__ : Optional[Any] = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a__ : Any = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a__ : int = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
a__ : Optional[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , __A , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__A )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 |
import collections
import os
import re
from pathlib import Path
lowercase : int = """src/transformers"""
# Matches is_xxx_available()
lowercase : List[str] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase : str = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase : List[str] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase : List[Any] = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase : Optional[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase : List[Any] = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase : str = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase : int = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowercase : List[str] = re.compile(r"""^\s*else:""")
def A_ ( A__ ) -> Optional[int]:
if _re_test_backend.search(A__ ) is None:
return None
a__ : Optional[Any] = [b[0] for b in _re_backend.findall(A__ )]
backends.sort()
return "_and_".join(A__ )
def A_ ( A__ ) -> str:
with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ : Optional[Any] = f.readlines()
a__ : Optional[Any] = 0
while line_index < len(A__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A__ ):
return None
# First grab the objects without a specific backend in _import_structure
a__ : int = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
a__ : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A__ ):
a__ : Optional[int] = _re_one_line_import_struct.search(A__ ).groups()[0]
a__ : List[str] = re.findall(R'\[([^\]]+)\]' , A__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
a__ : int = _re_import_struct_key_value.search(A__ )
if single_line_import_search is not None:
a__ : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(A__ ) > 0]
objects.extend(A__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
a__ : Any = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
a__ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
a__ : Any = lines[line_index]
if _re_import_struct_add_one.search(A__ ) is not None:
objects.append(_re_import_struct_add_one.search(A__ ).groups()[0] )
elif _re_import_struct_add_many.search(A__ ) is not None:
a__ : int = _re_import_struct_add_many.search(A__ ).groups()[0].split(', ' )
a__ : List[Any] = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_between_brackets.search(A__ ) is not None:
a__ : List[str] = _re_between_brackets.search(A__ ).groups()[0].split(', ' )
a__ : int = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_quote_object.search(A__ ) is not None:
objects.append(_re_quote_object.search(A__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
a__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
a__ : Union[str, Any] = []
while (
line_index < len(A__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
a__ : List[Any] = lines[line_index]
a__ : List[str] = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
a__ : Dict = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(A__ ):
# If the line is an if is_backend_available, we grab all objects associated.
a__ : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
a__ : List[str] = lines[line_index]
a__ : Union[str, Any] = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
a__ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A_ ( A__ , A__ ) -> Dict:
def find_duplicates(A__ ):
return [k for k, v in collections.Counter(A__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
a__ : Union[str, Any] = []
for key in import_dict_objects.keys():
a__ : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
a__ : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
a__ : str = 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A_ ( ) -> List[Any]:
a__ : Tuple = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
a__ : Tuple = os.path.join(A__ , '__init__.py' )
a__ : Optional[int] = parse_init(A__ )
if objects is not None:
a__ : List[Any] = analyze_results(*A__ )
if len(A__ ) > 0:
a__ : List[Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(A__ ) )
if len(A__ ) > 0:
raise ValueError('\n\n'.join(A__ ) )
def A_ ( ) -> List[Any]:
a__ : List[str] = []
for path, directories, files in os.walk(A__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(A__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A__ ) / folder).glob('*.py' ) ) ) == 0:
continue
a__ : List[str] = str((Path(A__ ) / folder).relative_to(A__ ) )
a__ : List[Any] = short_path.replace(os.path.sep , '.' )
submodules.append(A__ )
for fname in files:
if fname == "__init__.py":
continue
a__ : str = str((Path(A__ ) / fname).relative_to(A__ ) )
a__ : Any = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(A__ )
return submodules
lowercase : Union[str, Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A_ ( ) -> Dict:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
a__ : List[Any] = direct_transformers_import(A__ )
a__ : Optional[int] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A__ , '__init__.py' ) , 'r' ) as f:
a__ : Optional[Any] = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , A__ ) ) )
a__ : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A__ ) > 0:
a__ : Optional[int] = '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 392 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
A_ = "2020.9.26"
A_ = "xcodz-dot, cclaus, dhruvmanila"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float]:
if not all(isinstance(__UpperCamelCase ,(float, int) ) for val in locals().values() ):
lowerCamelCase_ = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = ((x * distance) / (z + distance)) * scale
lowerCamelCase_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Axis must be a str' )
lowerCamelCase_ = locals()
del input_variables["axis"]
if not all(isinstance(__UpperCamelCase ,(float, int) ) for val in input_variables.values() ):
lowerCamelCase_ = (
'Input values except axis must either be float or int: '
f'''{list(input_variables.values() )}'''
)
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
lowerCamelCase_ = x * math.cos(__UpperCamelCase ) - y * math.sin(__UpperCamelCase )
lowerCamelCase_ = y * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase )
lowerCamelCase_ = z
elif axis == "x":
lowerCamelCase_ = y * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase )
lowerCamelCase_ = z * math.cos(__UpperCamelCase ) + y * math.sin(__UpperCamelCase )
lowerCamelCase_ = x
elif axis == "y":
lowerCamelCase_ = x * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase )
lowerCamelCase_ = z * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase )
lowerCamelCase_ = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 42 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : List[Any] = int(_lowerCAmelCase )
assert noofclusters < len(_lowerCAmelCase )
# Find out the dimensionality
UpperCamelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase : List[str] = list(range(len(_lowerCAmelCase ) ) )
shuffle(_lowerCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase : Any = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase : Union[str, Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase : Optional[int] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase : Tuple = tf.placeholder("float64" , [dim] )
UpperCamelCase : str = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCAmelCase , _lowerCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase : List[str] = [tf.Variable(0 ) for i in range(len(_lowerCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase : Any = tf.placeholder("int32" )
UpperCamelCase : Optional[int] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCAmelCase , _lowerCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase : Tuple = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase : List[str] = tf.reduce_mean(_lowerCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase : Optional[int] = tf.placeholder("float" , [dim] )
UpperCamelCase : Any = tf.placeholder("float" , [dim] )
UpperCamelCase : List[str] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCAmelCase , _lowerCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase : Tuple = tf.placeholder("float" , [noofclusters] )
UpperCamelCase : str = tf.argmin(_lowerCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase : Any = 100
for _ in range(_lowerCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase : Optional[Any] = [
sess.run(_lowerCAmelCase , feed_dict={va: vect, va: sess.run(_lowerCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase : Tuple = sess.run(
_lowerCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCAmelCase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase : str = [
vectors[i]
for i in range(len(_lowerCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase : Optional[int] = sess.run(
_lowerCAmelCase , feed_dict={mean_input: array(_lowerCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase : Union[str, Any] = sess.run(_lowerCAmelCase )
UpperCamelCase : Tuple = sess.run(_lowerCAmelCase )
return centroids, assignments
| 629 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
a__: Optional[int] = logging.get_logger(__name__)
a__: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a__: int = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
a__: List[str] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = RobertaTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase="replace",__lowerCamelCase="<s>",__lowerCamelCase="</s>",__lowerCamelCase="</s>",__lowerCamelCase="<s>",__lowerCamelCase="<unk>",__lowerCamelCase="<pad>",__lowerCamelCase="<mask>",__lowerCamelCase=False,__lowerCamelCase=True,**__lowerCamelCase,):
super().__init__(
__lowerCamelCase,__lowerCamelCase,tokenizer_file=__lowerCamelCase,errors=__lowerCamelCase,bos_token=__lowerCamelCase,eos_token=__lowerCamelCase,sep_token=__lowerCamelCase,cls_token=__lowerCamelCase,unk_token=__lowerCamelCase,pad_token=__lowerCamelCase,mask_token=__lowerCamelCase,add_prefix_space=__lowerCamelCase,trim_offsets=__lowerCamelCase,**__lowerCamelCase,)
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''',__lowerCamelCase ) != add_prefix_space:
A__ = getattr(__lowerCamelCase,pre_tok_state.pop('''type''' ) )
A__ = add_prefix_space
A__ = pre_tok_class(**__lowerCamelCase )
A__ = add_prefix_space
A__ = '''post_processor'''
A__ = getattr(self.backend_tokenizer,__lowerCamelCase,__lowerCamelCase )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state['''sep'''] )
if "cls" in state:
A__ = tuple(state['''cls'''] )
A__ = False
if state.get('''add_prefix_space''',__lowerCamelCase ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get('''trim_offsets''',__lowerCamelCase ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(__lowerCamelCase,state.pop('''type''' ) )
A__ = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer,__lowerCamelCase,__lowerCamelCase )
@property
def UpperCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else value
A__ = value
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
A__ = kwargs.get('''is_split_into_words''',__lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
A__ = kwargs.get('''is_split_into_words''',__lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 212 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a__: Union[str, Any] = logging.getLogger(__name__)
def UpperCamelCase__( )->List[Any]:
A__ = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=UpperCamelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=UpperCamelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=UpperCamelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=UpperCamelCase__ , default='''data/dump''' , help='''The dump file prefix.''' )
A__ = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A__ = BertTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
A__ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
A__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
A__ = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
A__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
A__ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
A__ = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f"{len(UpperCamelCase__ )} examples to process." )
A__ = []
A__ = 0
A__ = 1_00_00
A__ = time.time()
for text in data:
A__ = f"{bos} {text.strip()} {sep}"
A__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
rslt.append(UpperCamelCase__ )
iter += 1
if iter % interval == 0:
A__ = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A__ = time.time()
logger.info('''Finished binarization''' )
logger.info(f"{len(UpperCamelCase__ )} examples processed." )
A__ = f"{args.dump_file}.{args.tokenizer_name}.pickle"
A__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A__ = [np.uintaa(UpperCamelCase__ ) for d in rslt]
else:
A__ = [np.intaa(UpperCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(UpperCamelCase__ , '''wb''' ) as handle:
pickle.dump(rslt_ , UpperCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 212 | 1 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__snake_case )[0]
@deprecated(__snake_case , "Please use tf.data to implement this functionality." )
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__snake_case ) as bytestream:
__lowerCamelCase : Union[str, Any] = _readaa(__snake_case )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
__lowerCamelCase : List[Any] = _readaa(__snake_case )
__lowerCamelCase : List[str] = _readaa(__snake_case )
__lowerCamelCase : List[Any] = _readaa(__snake_case )
__lowerCamelCase : int = bytestream.read(rows * cols * num_images )
__lowerCamelCase : List[Any] = numpy.frombuffer(__snake_case , dtype=numpy.uinta )
__lowerCamelCase : List[Any] = data.reshape(__snake_case , __snake_case , __snake_case , 1 )
return data
@deprecated(__snake_case , "Please use tf.one_hot on tensors." )
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Any ) -> Dict:
'''simple docstring'''
__lowerCamelCase : str = labels_dense.shape[0]
__lowerCamelCase : Optional[int] = numpy.arange(__snake_case ) * num_classes
__lowerCamelCase : List[Any] = numpy.zeros((num_labels, num_classes) )
__lowerCamelCase : Tuple = 1
return labels_one_hot
@deprecated(__snake_case , "Please use tf.data to implement this functionality." )
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Optional[Any]=False , _lowerCamelCase: Optional[int]=10 ) -> Any:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__snake_case ) as bytestream:
__lowerCamelCase : str = _readaa(__snake_case )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
__lowerCamelCase : Dict = _readaa(__snake_case )
__lowerCamelCase : str = bytestream.read(__snake_case )
__lowerCamelCase : Tuple = numpy.frombuffer(__snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__snake_case , __snake_case )
return labels
class _snake_case :
@deprecated(
__lowerCAmelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : int=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : int=dtypes.floataa , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=None , ):
__lowerCamelCase , __lowerCamelCase : List[str] = random_seed.get_seed(__lowerCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCamelCase : Optional[Any] = dtypes.as_dtype(__lowerCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
__lowerCamelCase : Optional[Any] = 10000
__lowerCamelCase : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__lowerCamelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCamelCase : List[str] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCamelCase : List[Any] = images.astype(numpy.floataa )
__lowerCamelCase : List[Any] = numpy.multiply(__lowerCAmelCase , 1.0 / 2_5_5.0 )
__lowerCamelCase : Union[str, Any] = images
__lowerCamelCase : Optional[Any] = labels
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Any = 0
@property
def lowerCamelCase__ ( self : str ):
return self._images
@property
def lowerCamelCase__ ( self : List[Any] ):
return self._labels
@property
def lowerCamelCase__ ( self : Dict ):
return self._num_examples
@property
def lowerCamelCase__ ( self : Any ):
return self._epochs_completed
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any=False , UpperCAmelCase : List[str]=True ):
if fake_data:
__lowerCamelCase : Tuple = [1] * 784
__lowerCamelCase : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCAmelCase )],
[fake_label for _ in range(__lowerCAmelCase )],
)
__lowerCamelCase : Any = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCAmelCase )
__lowerCamelCase : Optional[Any] = self.images[perma]
__lowerCamelCase : int = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCamelCase : int = self._num_examples - start
__lowerCamelCase : Any = self._images[start : self._num_examples]
__lowerCamelCase : Any = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCAmelCase )
__lowerCamelCase : List[str] = self.images[perm]
__lowerCamelCase : Tuple = self.labels[perm]
# Start next epoch
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Any = batch_size - rest_num_examples
__lowerCamelCase : Optional[Any] = self._index_in_epoch
__lowerCamelCase : str = self._images[start:end]
__lowerCamelCase : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCamelCase : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__snake_case , "Please write your own downloading logic." )
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: int , _lowerCamelCase: str ) -> str:
'''simple docstring'''
if not gfile.Exists(__snake_case ):
gfile.MakeDirs(__snake_case )
__lowerCamelCase : Optional[int] = os.path.join(__snake_case , __snake_case )
if not gfile.Exists(__snake_case ):
urllib.request.urlretrieve(__snake_case , __snake_case ) # noqa: S310
with gfile.GFile(__snake_case ) as f:
__lowerCamelCase : Any = f.size()
print("Successfully downloaded" , __snake_case , __snake_case , "bytes." )
return filepath
@deprecated(
__snake_case , "Please use alternatives such as:" " tensorflow_datasets.load(\'mnist\')" )
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: str=False , _lowerCamelCase: Dict=False , _lowerCamelCase: Dict=dtypes.floataa , _lowerCamelCase: Dict=True , _lowerCamelCase: Any=5000 , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: Optional[Any]=DEFAULT_SOURCE_URL , ) -> int:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__snake_case , one_hot=__snake_case , dtype=__snake_case , seed=__snake_case )
__lowerCamelCase : Tuple = fake()
__lowerCamelCase : Union[str, Any] = fake()
__lowerCamelCase : Union[str, Any] = fake()
return _Datasets(train=__snake_case , validation=__snake_case , test=__snake_case )
if not source_url: # empty string check
__lowerCamelCase : List[str] = DEFAULT_SOURCE_URL
__lowerCamelCase : List[str] = "train-images-idx3-ubyte.gz"
__lowerCamelCase : Optional[int] = "train-labels-idx1-ubyte.gz"
__lowerCamelCase : str = "t10k-images-idx3-ubyte.gz"
__lowerCamelCase : Union[str, Any] = "t10k-labels-idx1-ubyte.gz"
__lowerCamelCase : Union[str, Any] = _maybe_download(
__snake_case , __snake_case , source_url + train_images_file )
with gfile.Open(__snake_case , "rb" ) as f:
__lowerCamelCase : Tuple = _extract_images(__snake_case )
__lowerCamelCase : int = _maybe_download(
__snake_case , __snake_case , source_url + train_labels_file )
with gfile.Open(__snake_case , "rb" ) as f:
__lowerCamelCase : str = _extract_labels(__snake_case , one_hot=__snake_case )
__lowerCamelCase : List[Any] = _maybe_download(
__snake_case , __snake_case , source_url + test_images_file )
with gfile.Open(__snake_case , "rb" ) as f:
__lowerCamelCase : List[Any] = _extract_images(__snake_case )
__lowerCamelCase : Union[str, Any] = _maybe_download(
__snake_case , __snake_case , source_url + test_labels_file )
with gfile.Open(__snake_case , "rb" ) as f:
__lowerCamelCase : Optional[Any] = _extract_labels(__snake_case , one_hot=__snake_case )
if not 0 <= validation_size <= len(__snake_case ):
__lowerCamelCase : Union[str, Any] = (
"Validation size should be between 0 and "
F"""{len(__snake_case )}. Received: {validation_size}."""
)
raise ValueError(__snake_case )
__lowerCamelCase : Optional[Any] = train_images[:validation_size]
__lowerCamelCase : Optional[Any] = train_labels[:validation_size]
__lowerCamelCase : Optional[Any] = train_images[validation_size:]
__lowerCamelCase : int = train_labels[validation_size:]
__lowerCamelCase : Dict = {"dtype": dtype, "reshape": reshape, "seed": seed}
__lowerCamelCase : Any = _DataSet(__snake_case , __snake_case , **__snake_case )
__lowerCamelCase : Optional[int] = _DataSet(__snake_case , __snake_case , **__snake_case )
__lowerCamelCase : List[str] = _DataSet(__snake_case , __snake_case , **__snake_case )
return _Datasets(train=__snake_case , validation=__snake_case , test=__snake_case ) | 646 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCamelCase__ = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCamelCase__ = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCamelCase__ = name.replace('''blocks''' ,'''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''attn''' ,'''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase__ = name.replace('''proj''' ,'''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase__ = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCamelCase__ = name.replace('''norm1''' ,'''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''norm2''' ,'''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase__ = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCamelCase__ = name.replace('''ln_1''' ,'''layer_norm1''' )
if "ln_2" in name:
lowerCamelCase__ = name.replace('''ln_2''' ,'''layer_norm2''' )
if "c_fc" in name:
lowerCamelCase__ = name.replace('''c_fc''' ,'''fc1''' )
if "c_proj" in name:
lowerCamelCase__ = name.replace('''c_proj''' ,'''fc2''' )
if "text_encoder" in name:
lowerCamelCase__ = name.replace('''text_encoder''' ,'''text_model''' )
if "ln_final" in name:
lowerCamelCase__ = name.replace('''ln_final''' ,'''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_hidden''' ,'''text_projection''' )
if "text_projector.linear_out" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_out''' ,'''text_projection.3''' )
return name
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__snake_case )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ , lowerCamelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCamelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ = int(key_split[3] )
lowerCamelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[
dim : dim * 2, :
]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = rename_key(__snake_case )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase__ = val.squeeze_()
else:
lowerCamelCase__ = val
return orig_state_dict
def lowerCAmelCase__() -> int:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="groupvit-gcc-yfcc" ,__snake_case=False ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = GroupViTConfig()
lowerCamelCase__ = GroupViTModel(__snake_case ).eval()
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )['''model''']
lowerCamelCase__ = convert_state_dict(__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__snake_case ,strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__snake_case ) == 0)
# verify result
lowerCamelCase__ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=__snake_case ,padding=__snake_case ,return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase__ = model(**__snake_case )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase__ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase__ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image ,__snake_case ,atol=1E-3 )
processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
print('''Successfully saved processor and model to''' ,__snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(__snake_case ,organization='''nielsr''' )
model.push_to_hub(__snake_case ,organization='''nielsr''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_a = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 481 | 0 |
def A_ ( lowercase_ ) ->bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 259 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv3FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 259 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter image url: """).strip()
print(F"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 225 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Tuple ='mra'
def __init__( self : Dict , __lowercase : int=50265 , __lowercase : Dict=768 , __lowercase : str=12 , __lowercase : str=12 , __lowercase : Tuple=3072 , __lowercase : Optional[Any]="gelu" , __lowercase : str=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : int=512 , __lowercase : Union[str, Any]=1 , __lowercase : List[str]=0.02 , __lowercase : Dict=1E-5 , __lowercase : Any="absolute" , __lowercase : Dict=4 , __lowercase : List[Any]="full" , __lowercase : List[str]=0 , __lowercase : Tuple=0 , __lowercase : str=1 , __lowercase : Union[str, Any]=0 , __lowercase : Optional[Any]=2 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = position_embedding_type
__a = block_per_row
__a = approx_mode
__a = initial_prior_first_n_blocks
__a = initial_prior_diagonal_n_blocks
| 225 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = DownBlockaD # noqa F405
__snake_case = """down"""
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ResnetDownsampleBlockaD # noqa F405
__snake_case = """down"""
def _snake_case ( self: Dict ):
__lowerCamelCase : str = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AttnDownBlockaD # noqa F405
__snake_case = """down"""
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : List[str] = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CrossAttnDownBlockaD # noqa F405
__snake_case = """down"""
def _snake_case ( self: Any ):
__lowerCamelCase : int = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Dict = 32
return init_dict, inputs_dict
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = SimpleCrossAttnDownBlockaD # noqa F405
__snake_case = """down"""
@property
def _snake_case ( self: Any ):
return super().get_dummy_input(include_encoder_hidden_states=a )
def _snake_case ( self: Any ):
__lowerCamelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : List[str] = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = SkipDownBlockaD # noqa F405
__snake_case = """down"""
@property
def _snake_case ( self: Tuple ):
return super().get_dummy_input(include_skip_sample=a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[Any] = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AttnSkipDownBlockaD # noqa F405
__snake_case = """down"""
@property
def _snake_case ( self: Any ):
return super().get_dummy_input(include_skip_sample=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : str = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = DownEncoderBlockaD # noqa F405
__snake_case = """down"""
@property
def _snake_case ( self: Optional[Any] ):
return super().get_dummy_input(include_temb=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : str = {
'in_channels': 32,
'out_channels': 32,
}
__lowerCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AttnDownEncoderBlockaD # noqa F405
__snake_case = """down"""
@property
def _snake_case ( self: Union[str, Any] ):
return super().get_dummy_input(include_temb=a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Tuple = {
'in_channels': 32,
'out_channels': 32,
}
__lowerCamelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = UNetMidBlockaD # noqa F405
__snake_case = """mid"""
def _snake_case ( self: Any ):
__lowerCamelCase : List[str] = {
'in_channels': 32,
'temb_channels': 128,
}
__lowerCamelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = UNetMidBlockaDCrossAttn # noqa F405
__snake_case = """mid"""
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Tuple = 32
return init_dict, inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : Union[str, Any] = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = UNetMidBlockaDSimpleCrossAttn # noqa F405
__snake_case = """mid"""
@property
def _snake_case ( self: List[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Tuple = 32
return init_dict, inputs_dict
def _snake_case ( self: int ):
__lowerCamelCase : Optional[int] = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = UpBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: str ):
return super().get_dummy_input(include_res_hidden_states_tuple=a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[int] = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ResnetUpsampleBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Tuple = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CrossAttnUpBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=a )
def _snake_case ( self: Any ):
__lowerCamelCase : str = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Tuple = 32
return init_dict, inputs_dict
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Tuple = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = SimpleCrossAttnUpBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: str ):
return super().get_dummy_input(include_res_hidden_states_tuple=a , include_encoder_hidden_states=a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Optional[int] = 32
return init_dict, inputs_dict
def _snake_case ( self: Dict ):
__lowerCamelCase : Optional[Any] = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AttnUpBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=a )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[int] = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = SkipUpBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AttnSkipUpBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=a )
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = UpDecoderBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: Optional[int] ):
return super().get_dummy_input(include_temb=a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = {'in_channels': 32, 'out_channels': 32}
__lowerCamelCase : Any = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : str = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(a )
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AttnUpDecoderBlockaD # noqa F405
__snake_case = """up"""
@property
def _snake_case ( self: Optional[Any] ):
return super().get_dummy_input(include_temb=a )
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = {'in_channels': 32, 'out_channels': 32}
__lowerCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(a )
| 713 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = 42
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any] , a: Optional[Any]=3 , a: Tuple=3 , a: str=("DownEncoderBlock2D",) , a: str=(64,) , a: Optional[int]=2 , a: int=32 , a: str="silu" , a: Optional[Any]=True , ):
super().__init__()
__lowerCamelCase : int = layers_per_block
__lowerCamelCase : List[Any] = torch.nn.Convad(
a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : Dict = nn.ModuleList([] )
# down
__lowerCamelCase : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(a ):
__lowerCamelCase : str = output_channel
__lowerCamelCase : Optional[int] = block_out_channels[i]
__lowerCamelCase : Dict = i == len(a ) - 1
__lowerCamelCase : List[Any] = get_down_block(
a , num_layers=self.layers_per_block , in_channels=a , out_channels=a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , )
self.down_blocks.append(a )
# mid
__lowerCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# out
__lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Optional[Any] = nn.SiLU()
__lowerCamelCase : int = 2 * out_channels if double_z else out_channels
__lowerCamelCase : Tuple = nn.Convad(block_out_channels[-1] , a , 3 , padding=1 )
__lowerCamelCase : List[Any] = False
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase : List[str] = x
__lowerCamelCase : Dict = self.conv_in(a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: int ):
def custom_forward(*a: Optional[Any] ):
return module(*a )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , use_reentrant=a )
# middle
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , use_reentrant=a )
else:
for down_block in self.down_blocks:
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a )
# middle
__lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a )
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase : List[Any] = down_block(a )
# middle
__lowerCamelCase : Union[str, Any] = self.mid_block(a )
# post-process
__lowerCamelCase : Tuple = self.conv_norm_out(a )
__lowerCamelCase : List[str] = self.conv_act(a )
__lowerCamelCase : int = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , a: List[str]=3 , a: Tuple=3 , a: str=("UpDecoderBlock2D",) , a: Union[str, Any]=(64,) , a: Optional[Any]=2 , a: Optional[Any]=32 , a: str="silu" , a: Union[str, Any]="group" , ):
super().__init__()
__lowerCamelCase : List[Any] = layers_per_block
__lowerCamelCase : Any = nn.Convad(
a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = nn.ModuleList([] )
__lowerCamelCase : Optional[Any] = in_channels if norm_type == 'spatial' else None
# mid
__lowerCamelCase : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# up
__lowerCamelCase : Any = list(reversed(a ) )
__lowerCamelCase : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a ):
__lowerCamelCase : List[Any] = output_channel
__lowerCamelCase : List[str] = reversed_block_out_channels[i]
__lowerCamelCase : Optional[Any] = i == len(a ) - 1
__lowerCamelCase : Optional[Any] = get_up_block(
a , num_layers=self.layers_per_block + 1 , in_channels=a , out_channels=a , prev_output_channel=a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , resnet_time_scale_shift=a , )
self.up_blocks.append(a )
__lowerCamelCase : List[str] = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase : int = SpatialNorm(block_out_channels[0] , a )
else:
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Union[str, Any] = nn.SiLU()
__lowerCamelCase : List[Any] = nn.Convad(block_out_channels[0] , a , 3 , padding=1 )
__lowerCamelCase : List[str] = False
def _snake_case ( self: Optional[int] , a: Tuple , a: List[str]=None ):
__lowerCamelCase : List[str] = z
__lowerCamelCase : Union[str, Any] = self.conv_in(a )
__lowerCamelCase : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: Any ):
def custom_forward(*a: str ):
return module(*a )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a , use_reentrant=a )
__lowerCamelCase : str = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , a , use_reentrant=a )
else:
# middle
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a )
__lowerCamelCase : int = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a , a )
else:
# middle
__lowerCamelCase : int = self.mid_block(a , a )
__lowerCamelCase : List[str] = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = up_block(a , a )
# post-process
if latent_embeds is None:
__lowerCamelCase : Optional[int] = self.conv_norm_out(a )
else:
__lowerCamelCase : Dict = self.conv_norm_out(a , a )
__lowerCamelCase : Any = self.conv_act(a )
__lowerCamelCase : str = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] , a: List[Any] , a: List[Any] , a: List[Any] , a: Tuple=None , a: Tuple="random" , a: List[Any]=False , a: List[str]=True ):
super().__init__()
__lowerCamelCase : Optional[Any] = n_e
__lowerCamelCase : Optional[int] = vq_embed_dim
__lowerCamelCase : Tuple = beta
__lowerCamelCase : List[str] = legacy
__lowerCamelCase : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCamelCase : str = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
__lowerCamelCase : Dict = self.used.shape[0]
__lowerCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase : Any = self.re_embed
__lowerCamelCase : Optional[int] = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCamelCase : int = n_e
__lowerCamelCase : Optional[Any] = sane_index_shape
def _snake_case ( self: Tuple , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Any = self.used.to(a )
__lowerCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase : Dict = match.argmax(-1 )
__lowerCamelCase : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCamelCase : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCamelCase : str = self.unknown_index
return new.reshape(a )
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : List[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : Optional[int] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Union[str, Any] = self.used.to(a )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase : Optional[Any] = 0 # simply set to zero
__lowerCamelCase : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a )
return back.reshape(a )
def _snake_case ( self: int , a: List[str] ):
# reshape z -> (batch, height, width, channel) and flatten
__lowerCamelCase : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCamelCase : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase : int = torch.argmin(torch.cdist(a , self.embedding.weight ) , dim=1 )
__lowerCamelCase : str = self.embedding(a ).view(z.shape )
__lowerCamelCase : str = None
__lowerCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCamelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCamelCase : int = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCamelCase : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCamelCase : Optional[Any] = self.remap_to_used(a )
__lowerCamelCase : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self: Tuple , a: Optional[int] , a: Any ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowerCamelCase : Any = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCamelCase : Any = self.unmap_to_all(a )
__lowerCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCamelCase : str = self.embedding(a )
if shape is not None:
__lowerCamelCase : str = z_q.view(a )
# reshape back to match original input shape
__lowerCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: Dict , a: Any=False ):
__lowerCamelCase : Tuple = parameters
__lowerCamelCase , __lowerCamelCase : Any = torch.chunk(a , 2 , dim=1 )
__lowerCamelCase : List[str] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
__lowerCamelCase : int = deterministic
__lowerCamelCase : Dict = torch.exp(0.5 * self.logvar )
__lowerCamelCase : str = torch.exp(self.logvar )
if self.deterministic:
__lowerCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self: Union[str, Any] , a: Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
__lowerCamelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=a , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCamelCase : str = self.mean + self.std * sample
return x
def _snake_case ( self: List[str] , a: Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self: Optional[Any] , a: str , a: Any=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCamelCase : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a )
def _snake_case ( self: Optional[int] ):
return self.mean
| 230 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __a ( _snake_case ):
__UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __a ( _snake_case ):
__UpperCamelCase : np.ndarray
__UpperCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 109 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : int = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 330 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : Any = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = """nllb-moe"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str=12_81_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10_24 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : List[str]=40_96 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : int=40_96 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_5 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_5 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]="relu" , SCREAMING_SNAKE_CASE__ : Optional[int]=10_24 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]="float32" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=1_28 , SCREAMING_SNAKE_CASE__ : Dict=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_0_1 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_0_1 , SCREAMING_SNAKE_CASE__ : str="all" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.2 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 330 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
lowerCamelCase = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 474 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[[1, 2, 4], [1, 2, 3, 4]]
__lowercase =DisjunctiveConstraint(_lowerCAmelCase)
self.assertTrue(isinstance(dc.token_ids , _lowerCAmelCase))
with self.assertRaises(_lowerCAmelCase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(_lowerCAmelCase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCAmelCase):
DisjunctiveConstraint(_lowerCAmelCase) # fails here
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[[1, 2, 3], [1, 2, 4]]
__lowercase =DisjunctiveConstraint(_lowerCAmelCase)
__lowercase , __lowercase , __lowercase =dc.update(1)
__lowercase =stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__lowercase , __lowercase , __lowercase =dc.update(2)
__lowercase =stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__lowercase , __lowercase , __lowercase =dc.update(3)
__lowercase =stepped is True and completed is True and reset is False
self.assertTrue(_lowerCAmelCase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowercase =DisjunctiveConstraint(_lowerCAmelCase)
__lowercase , __lowercase , __lowercase =dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__lowercase , __lowercase , __lowercase =dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__lowercase , __lowercase , __lowercase =dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
__lowercase , __lowercase , __lowercase =dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
__lowercase , __lowercase , __lowercase =dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
__lowercase , __lowercase , __lowercase =dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
__lowercase , __lowercase , __lowercase =dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 474 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCAmelCase = TypeVar("""T""")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self : int , __UpperCamelCase : list[T] , __UpperCamelCase : Callable[[T, T], T] ) -> None:
_UpperCamelCase = None
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = [any_type for _ in range(self.N )] + arr
_UpperCamelCase = fnc
self.build()
def _UpperCamelCase ( self : Dict ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_UpperCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : T ) -> None:
p += self.N
_UpperCamelCase = v
while p > 1:
_UpperCamelCase = p // 2
_UpperCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCamelCase ( self : str , __UpperCamelCase : int , __UpperCamelCase : int ) -> T | None: # noqa: E741
_UpperCamelCase , _UpperCamelCase = l + self.N, r + self.N
_UpperCamelCase = None
while l <= r:
if l % 2 == 1:
_UpperCamelCase = self.st[l] if res is None else self.fn(__UpperCamelCase , self.st[l] )
if r % 2 == 0:
_UpperCamelCase = self.st[r] if res is None else self.fn(__UpperCamelCase , self.st[r] )
_UpperCamelCase , _UpperCamelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCAmelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCAmelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCAmelCase = SegmentTree(test_array, min)
UpperCAmelCase = SegmentTree(test_array, max)
UpperCAmelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowercase ( ) -> None:
for i in range(len(a__ ) ):
for j in range(a__ , len(a__ ) ):
_UpperCamelCase = reduce(a__ , test_array[i : j + 1] )
_UpperCamelCase = reduce(a__ , test_array[i : j + 1] )
_UpperCamelCase = reduce(lambda a__ , a__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(a__ , a__ )
assert max_range == max_segment_tree.query(a__ , a__ )
assert sum_range == sum_segment_tree.query(a__ , a__ )
test_all_segments()
for index, value in test_updates.items():
UpperCAmelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 342 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = 0
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCamelCase = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self : int ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
class UpperCAmelCase_ ( _lowercase):
snake_case__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 342 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_a : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56 | def __lowerCAmelCase ( A_ : int , A_ : int ) -> int:
return x if y == 0 else greatest_common_divisor(A_ , x % y )
def __lowerCAmelCase ( A_ : int , A_ : int ) -> int:
return (x * y) // greatest_common_divisor(A_ , A_ )
def __lowerCAmelCase ( A_ : int = 20 ) -> int:
__UpperCAmelCase = 1
for i in range(1 , n + 1 ):
__UpperCAmelCase = lcm(A_ , A_ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 221 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> int:
_snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Dict:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_snake_case = emb.weight.data
return lin_layer
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple="facebook/mbart-large-en-ro" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Tuple=False ) -> List[str]:
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(__lowerCamelCase )
_snake_case = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_snake_case = MBartConfig.from_pretrained(__lowerCamelCase , vocab_size=__lowerCamelCase )
if mbart_aa and finetuned:
_snake_case = '''relu'''
_snake_case = state_dict['''decoder.embed_tokens.weight''']
_snake_case = MBartForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase )
if finetuned:
_snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 709 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 430 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ :Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ :Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "megatron-bert"
def __init__( self , SCREAMING_SNAKE_CASE__=2_90_56 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = vocab_size
_UpperCamelCase :Optional[int] = hidden_size
_UpperCamelCase :List[Any] = num_hidden_layers
_UpperCamelCase :Optional[Any] = num_attention_heads
_UpperCamelCase :List[str] = hidden_act
_UpperCamelCase :List[str] = intermediate_size
_UpperCamelCase :int = hidden_dropout_prob
_UpperCamelCase :List[str] = attention_probs_dropout_prob
_UpperCamelCase :Any = max_position_embeddings
_UpperCamelCase :Dict = type_vocab_size
_UpperCamelCase :Any = initializer_range
_UpperCamelCase :Tuple = layer_norm_eps
_UpperCamelCase :Tuple = position_embedding_type
_UpperCamelCase :Any = use_cache
| 355 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ :str = logging.get_logger(__name__)
UpperCamelCase__ :Optional[int] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "markuplm"
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=2_16 , SCREAMING_SNAKE_CASE__=10_01 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :Any = vocab_size
_UpperCamelCase :Union[str, Any] = hidden_size
_UpperCamelCase :List[Any] = num_hidden_layers
_UpperCamelCase :int = num_attention_heads
_UpperCamelCase :Tuple = hidden_act
_UpperCamelCase :str = intermediate_size
_UpperCamelCase :Optional[int] = hidden_dropout_prob
_UpperCamelCase :Any = attention_probs_dropout_prob
_UpperCamelCase :Union[str, Any] = max_position_embeddings
_UpperCamelCase :Dict = type_vocab_size
_UpperCamelCase :int = initializer_range
_UpperCamelCase :Optional[int] = layer_norm_eps
_UpperCamelCase :Any = position_embedding_type
_UpperCamelCase :Any = use_cache
_UpperCamelCase :List[str] = classifier_dropout
# additional properties
_UpperCamelCase :Union[str, Any] = max_depth
_UpperCamelCase :Union[str, Any] = max_xpath_tag_unit_embeddings
_UpperCamelCase :Optional[Any] = max_xpath_subs_unit_embeddings
_UpperCamelCase :int = tag_pad_id
_UpperCamelCase :str = subs_pad_id
_UpperCamelCase :List[str] = xpath_unit_hidden_size
| 355 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__magic_name__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 469 | from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _A ( __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 768 , ) -> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase__ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) )
def _a (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
return self
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = (embeds * self.std) + self.mean
return embeds
| 469 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : list ):
'''simple docstring'''
if len(__a ) <= 1:
return lst
_lowerCamelCase : str = 1
while i < len(__a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_lowerCamelCase : Dict = 1
return lst
if __name__ == "__main__":
a_ = input("""Enter numbers separated by a comma:\n""").strip()
a_ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 437 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_A : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_ ( snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__lowerCAmelCase = k.replace(snake_case_ , snake_case_ )
return k
def UpperCamelCase_ ( snake_case_ : dict , snake_case_ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__lowerCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(snake_case_ )
__lowerCAmelCase = PegasusConfig(**snake_case_ )
__lowerCAmelCase = PegasusForConditionalGeneration(snake_case_ )
__lowerCAmelCase = torch_model.model.state_dict()
__lowerCAmelCase = {}
for k, v in tf_weights.items():
__lowerCAmelCase = rename_state_dict_key(snake_case_ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCAmelCase = v.T
__lowerCAmelCase = torch.tensor(snake_case_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCAmelCase = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
__lowerCAmelCase = mapping["""shared.weight"""]
__lowerCAmelCase = mapping["""shared.weight"""]
__lowerCAmelCase = {k: torch.zeros_like(snake_case_ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = torch_model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCamelCase_ ( snake_case_ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = tf.train.list_variables(snake_case_ )
__lowerCAmelCase = {}
__lowerCAmelCase = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(snake_case_ , desc="""converting tf checkpoint to dict""" ):
__lowerCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCAmelCase = tf.train.load_variable(snake_case_ , snake_case_ )
__lowerCAmelCase = array
return tf_weights
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = Path(snake_case_ ).parent.name
__lowerCAmelCase = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
__lowerCAmelCase = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=snake_case_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case_ )
# convert model
__lowerCAmelCase = get_tf_weights_as_numpy(snake_case_ )
__lowerCAmelCase = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__lowerCAmelCase = task_specific_params
__lowerCAmelCase = convert_pegasus(snake_case_ , snake_case_ )
torch_model.save_pretrained(snake_case_ )
__lowerCAmelCase = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(snake_case_ , Path(snake_case_ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : Any = parser.parse_args()
if args.save_dir is None:
_A : Dict = Path(args.tf_ckpt_path).parent.name
_A : str = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] = logging.get_logger(__name__)
_A : Union[str, Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = """dpr"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple=3_05_22 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : int = 0 , **SCREAMING_SNAKE_CASE__ : str , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 330 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , 'Tatoeba directory does not exist.' )
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def _lowercase ( self: int ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.resolver.write_model_card("opus-mt-he-en" ,dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng" | 46 |
from torch import nn
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ):
super().__init__()
__lowercase : Any = class_size
__lowercase : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : Dict = nn.Linear(_snake_case , _snake_case )
def snake_case_ ( self : Any , _snake_case : str ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__lowercase : Any = self.mlp(_snake_case )
return logits
| 509 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Tuple = (DDPMScheduler,)
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ ={
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _snake_case ( self )-> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )-> Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> str:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _snake_case ( self )-> int:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
lowerCamelCase_ =torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ =pred_prev_sample
lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
lowerCamelCase_ =torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ =pred_prev_sample
lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCamelCase_ =-1
else:
lowerCamelCase_ =timesteps[i + 1]
lowerCamelCase_ =scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[100, 87, 50, 1, 0]
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> str:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 75 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__A : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__A : str = F"""https://www.google.com/search?q={query}&num=100"""
__A : int = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__A : str = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__A : Any = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Any = "big_bird"
def __init__( self , __magic_name__=5_0_3_5_8 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu_new" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=4_0_9_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=6_6 , __magic_name__="block_sparse" , __magic_name__=True , __magic_name__=False , __magic_name__=6_4 , __magic_name__=3 , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , sep_token_id=__magic_name__ , **__magic_name__ , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = use_cache
_lowerCAmelCase = rescale_embeddings
_lowerCAmelCase = attention_type
_lowerCAmelCase = use_bias
_lowerCAmelCase = block_size
_lowerCAmelCase = num_random_blocks
_lowerCAmelCase = classifier_dropout
class __magic_name__ ( _UpperCamelCase ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 309 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a__ : List[str] = """docs/source/en/_toctree.yml"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(__lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__lowerCamelCase, key=lambda __lowerCamelCase : s["title"].lower() )
def A__ ( __lowerCamelCase=False ):
"""simple docstring"""
with open(__lowerCamelCase, encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCAmelCase = api_doc[model_idx]['sections']
_lowerCAmelCase = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if 'sections' in section]
_lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
_lowerCAmelCase = modality_doc['sections']
_lowerCAmelCase = clean_model_doc_toc(__lowerCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
_lowerCAmelCase = model_doc
_lowerCAmelCase = api_doc
with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase, allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 309 | 1 |
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase , __lowercase = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowercase = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 534 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'M-CLIP'
def __init__( self : Any , lowerCamelCase__ : List[Any]=1_024 , lowerCamelCase__ : List[str]=768 , **lowerCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = transformerDimSize
__lowercase = imageDimSize
super().__init__(**lowerCamelCase__ )
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Dict = MCLIPConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = XLMRobertaModel(lowerCamelCase__ )
__lowercase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.transformer(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
__lowercase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase__ ), embs
| 332 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ (_A , unittest.TestCase ):
'''simple docstring'''
_a = ConsistencyModelPipeline
_a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_a = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _lowerCAmelCase ( self : Dict ) ->Dict:
lowerCamelCase_ : Tuple = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _lowerCAmelCase ( self : int ) ->str:
lowerCamelCase_ : Tuple = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _lowerCAmelCase ( self : Dict , __a : int=False ) ->Tuple:
if class_cond:
lowerCamelCase_ : Tuple = self.dummy_cond_unet
else:
lowerCamelCase_ : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCamelCase_ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase_ : Tuple = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _lowerCAmelCase ( self : List[str] , __a : Any , __a : Optional[int]=0 ) ->str:
if str(__a ).startswith("""mps""" ):
lowerCamelCase_ : List[str] = torch.manual_seed(__a )
else:
lowerCamelCase_ : Any = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase_ : Any = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _lowerCAmelCase ( self : Tuple ) ->List[Any]:
lowerCamelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Optional[Any] = self.get_dummy_components()
lowerCamelCase_ : List[Any] = ConsistencyModelPipeline(**__a )
lowerCamelCase_ : Tuple = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : Dict = self.get_dummy_inputs(__a )
lowerCamelCase_ : List[Any] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCamelCase_ : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : str ) ->Any:
lowerCamelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Tuple = self.get_dummy_components(class_cond=__a )
lowerCamelCase_ : Tuple = ConsistencyModelPipeline(**__a )
lowerCamelCase_ : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(__a )
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : int = image[0, -3:, -3:, -1]
lowerCamelCase_ : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[str] = self.get_dummy_components()
lowerCamelCase_ : Any = ConsistencyModelPipeline(**__a )
lowerCamelCase_ : Tuple = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : int = self.get_dummy_inputs(__a )
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : str = None
lowerCamelCase_ : Dict = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCamelCase_ : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
lowerCamelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components(class_cond=__a )
lowerCamelCase_ : Dict = ConsistencyModelPipeline(**__a )
lowerCamelCase_ : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Dict = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : Any = image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[int] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : int , __a : Any=0 , __a : Optional[Any]=False , __a : Optional[Any]="cpu" , __a : str=torch.floataa , __a : int=(1, 3, 64, 64) ) ->List[Any]:
lowerCamelCase_ : str = torch.manual_seed(__a )
lowerCamelCase_ : str = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
lowerCamelCase_ : Any = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a )
lowerCamelCase_ : Union[str, Any] = latents
return inputs
def _lowerCAmelCase ( self : Any , __a : Optional[int]=0 , __a : Optional[Any]="cpu" , __a : Tuple=torch.floataa , __a : Any=(1, 3, 64, 64) ) ->Any:
if type(__a ) == str:
lowerCamelCase_ : int = torch.device(__a )
lowerCamelCase_ : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase_ : Tuple = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
return latents
def _lowerCAmelCase ( self : int ) ->Any:
lowerCamelCase_ : List[str] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase_ : int = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : List[str] = self.get_inputs()
lowerCamelCase_ : List[Any] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Tuple = image[0, -3:, -3:, -1]
lowerCamelCase_ : List[str] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
lowerCamelCase_ : Optional[int] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase_ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase_ : Union[str, Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : Union[str, Any] = self.get_inputs()
lowerCamelCase_ : str = 1
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : str = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _lowerCAmelCase ( self : int ) ->str:
lowerCamelCase_ : List[str] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase_ : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase_ : Union[str, Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : Any = self.get_inputs(get_fixed_latents=__a , device=__a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
lowerCamelCase_ : Any = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Tuple = image[0, -3:, -3:, -1]
lowerCamelCase_ : List[str] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _lowerCAmelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase_ : Optional[int] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase_ : Dict = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : List[Any] = self.get_inputs(get_fixed_latents=__a , device=__a )
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
lowerCamelCase_ : Any = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCamelCase_ : Tuple = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 713 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ""
_a = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Any , __a : Optional[DatasetInfo] = None , __a : Optional[str] = None , **__a : Any , ) ->Any:
super().__init__(self , **__a )
lowerCamelCase_ : Tuple = repo_info
lowerCamelCase_ : Any = token
lowerCamelCase_ : Any = None
def _lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
if self.dir_cache is None:
lowerCamelCase_ : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase_ : int = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__a ): {"""name""": str(__a ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCAmelCase ( self : int , __a : str , __a : str = "rb" , **__a : Optional[Any] , ) ->Dict:
if not isinstance(self.repo_info , __a ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowerCamelCase_ : int = hf_hub_url(self.repo_info.id , __a , revision=self.repo_info.sha )
return fsspec.open(
__a , mode=__a , headers=get_authentication_headers_for_url(__a , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _lowerCAmelCase ( self : Dict , __a : str , **__a : List[Any] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Tuple = self._strip_protocol(__a )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__a )
def _lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : str=False , **__a : List[str] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Optional[Any] = PurePosixPath(path.strip("""/""" ) )
lowerCamelCase_ : Dict = {}
for p, f in self.dir_cache.items():
lowerCamelCase_ : str = PurePosixPath(p.strip("""/""" ) )
lowerCamelCase_ : Dict = p.parent
if root == path:
lowerCamelCase_ : int = f
lowerCamelCase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 171 | 0 |
def a_ ( __magic_name__ ) -> None:
"""simple docstring"""
snake_case : Any = generate_pascal_triangle(__magic_name__ )
for row_idx in range(__magic_name__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def a_ ( __magic_name__ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
snake_case : list[list[int]] = []
for current_row_idx in range(__magic_name__ ):
snake_case : Any = populate_current_row(__magic_name__ , __magic_name__ )
triangle.append(__magic_name__ )
return triangle
def a_ ( __magic_name__ , __magic_name__ ) -> list[int]:
"""simple docstring"""
snake_case : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case , snake_case : Any = 1, 1
for current_col_idx in range(1 , __magic_name__ ):
calculate_current_element(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return current_row
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> None:
"""simple docstring"""
snake_case : List[str] = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case : Any = triangle[current_row_idx - 1][current_col_idx]
snake_case : Optional[Any] = above_to_left_elt + above_to_right_elt
def a_ ( __magic_name__ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
snake_case : list[list[int]] = [[1]]
for row_index in range(1 , __magic_name__ ):
snake_case : str = [0] + result[-1] + [0]
snake_case : str = row_index + 1
# Calculate the number of distinct elements in a row
snake_case : Any = sum(divmod(__magic_name__ , 2 ) )
snake_case : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case : int = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case : int = row_first_half + row_second_half
result.append(__magic_name__ )
return result
def a_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
snake_case : str = F"{func.__name__}({value})"
snake_case : Dict = timeit(F"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 598 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class a_ ( a ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A__ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
A__ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A__ : str = "question"
A__ : str = "context"
A__ : str = "answers"
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 598 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _lowerCamelCase ( lowerCamelCase_ : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
UpperCAmelCase_ : Any = nums[0]
for i in range(1 , len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = nums[i]
UpperCAmelCase_ : Tuple = max(lowerCamelCase_ , ans + num , lowerCamelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
snake_case__ : Tuple = int(input("Enter number of elements : ").strip())
snake_case__ : List[str] = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 715 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Tuple = 'The dog is cute and lives in the garden house'
UpperCAmelCase_ : Dict = jnp.array([tokenizer.encode(snake_case_ )] )
UpperCAmelCase_ : str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCAmelCase_ : str = model(snake_case_ )['last_hidden_state']
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 389 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Any ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[Any] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : Any =dict(zip(vocab, range(len(vocab))))
_A : Optional[Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Optional[Any] =Path(tmpdirname)
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : Tuple =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : List[str] =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Optional[Any] =FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_A : Optional[int] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : List[Any] =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 4 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( __a ):
"""simple docstring"""
lowerCamelCase_ = (DEISMultistepScheduler,)
lowerCamelCase_ = (('''num_inference_steps''', 2_5),)
def lowercase_ ( self , **a ) -> List[Any]:
"""simple docstring"""
_A = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**_lowercase )
return config
def lowercase_ ( self , a=0 , **a ) -> str:
"""simple docstring"""
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('''num_inference_steps''' , _lowercase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**_lowercase )
_A = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_A = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_A = dummy_past_residuals[: new_scheduler.config.solver_order]
_A = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1 ):
_A = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_A = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase_ ( self , a=0 , **a ) -> str:
"""simple docstring"""
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('''num_inference_steps''' , _lowercase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_A = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[: new_scheduler.config.solver_order]
_A = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_A = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self , a=None , **a ) -> List[str]:
"""simple docstring"""
if scheduler is None:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_lowercase )
_A = scheduler_class(**_lowercase )
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_lowercase )
_A = scheduler_class(**_lowercase )
_A = 1_0
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_lowercase , _lowercase )
_A = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('''num_inference_steps''' , _lowercase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_lowercase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps''' ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps''' ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
_A = dummy_past_residuals[: scheduler.config.solver_order]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_A = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_A = DEISMultistepScheduler(**self.get_scheduler_config() )
_A = self.full_loop(scheduler=_lowercase )
_A = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
_A = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_A = DPMSolverMultistepScheduler.from_config(scheduler.config )
_A = UniPCMultistepScheduler.from_config(scheduler.config )
_A = DEISMultistepScheduler.from_config(scheduler.config )
_A = self.full_loop(scheduler=_lowercase )
_A = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''deis''' , solver_order=_lowercase , solver_type=_lowercase , )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
_A = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase ).any(), "Samples have nan numbers"
def lowercase_ ( self ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=_lowercase )
self.check_over_configs(lower_order_final=_lowercase )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0 )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A = self.full_loop()
_A = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = self.full_loop(prediction_type='''v_prediction''' )
_A = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0 )
_A = scheduler_class(**_lowercase )
_A = 1_0
_A = self.dummy_model()
_A = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_lowercase , _lowercase )
_A = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
assert sample.dtype == torch.floataa | 317 | import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( lowercase: Any , lowercase: Optional[int] , lowercase: Union[str, Any]=None , lowercase: int=None , lowercase: Any=None , lowercase: Optional[int]=None , lowercase: int=None , lowercase: int=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase: Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCamelCase: Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCamelCase: List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase: Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase: Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __magic_name__ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowercase : Tuple , _lowercase : int=13 , _lowercase : Any=7 , _lowercase : List[str]=True , _lowercase : int=False , _lowercase : Union[str, Any]=99 , _lowercase : Optional[Any]=16 , _lowercase : int=2 , _lowercase : Any=4 , _lowercase : Any=4 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : Any=0.1 , _lowercase : List[Any]=32 , _lowercase : str=2 , _lowercase : List[Any]=1 , _lowercase : int=0 , _lowercase : int=0.02 , ):
"""simple docstring"""
_UpperCamelCase: int = parent
_UpperCamelCase: Any = batch_size
_UpperCamelCase: Union[str, Any] = seq_length
_UpperCamelCase: List[Any] = is_training
_UpperCamelCase: Any = use_labels
_UpperCamelCase: Any = vocab_size
_UpperCamelCase: List[Any] = hidden_size
_UpperCamelCase: str = num_hidden_layers
_UpperCamelCase: List[Any] = num_attention_heads
_UpperCamelCase: Dict = intermediate_size
_UpperCamelCase: str = hidden_act
_UpperCamelCase: int = hidden_dropout_prob
_UpperCamelCase: Dict = attention_probs_dropout_prob
_UpperCamelCase: Dict = max_position_embeddings
_UpperCamelCase: List[str] = eos_token_id
_UpperCamelCase: str = pad_token_id
_UpperCamelCase: List[Any] = bos_token_id
_UpperCamelCase: List[Any] = initializer_range
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCamelCase: Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCamelCase: Tuple = shift_tokens_right(_lowercase , 1 , 2 )
_UpperCamelCase: List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
_UpperCamelCase: int = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Any ):
"""simple docstring"""
_UpperCamelCase: str = 20
_UpperCamelCase: Union[str, Any] = model_class_name(_lowercase )
_UpperCamelCase: Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase: Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase: Tuple = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
_UpperCamelCase: Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase: List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase: List[str] = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
_UpperCamelCase: List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase: List[Any] = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
_UpperCamelCase: List[Any] = model.decode(_lowercase , _lowercase )
_UpperCamelCase: List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase ( self : List[str] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Dict = 20
_UpperCamelCase: int = model_class_name(_lowercase )
_UpperCamelCase: Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase: Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase: List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase: List[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
_UpperCamelCase: Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase: Tuple = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
_UpperCamelCase: str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase: Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
_UpperCamelCase: Any = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
_UpperCamelCase: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : List[str] = 9_9
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Any = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase: Dict = input_ids.shape[0]
_UpperCamelCase: int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Optional[int] = self._get_config_and_data()
_UpperCamelCase: Dict = FlaxBlenderbotForConditionalGeneration(_lowercase )
_UpperCamelCase: Any = lm_model(input_ids=_lowercase )
_UpperCamelCase: Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase: int = FlaxBlenderbotForConditionalGeneration(_lowercase )
_UpperCamelCase: List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCamelCase: Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCamelCase: Tuple = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase )
_UpperCamelCase: Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCamelCase: Dict = shift_tokens_right(_lowercase , 1 , 2 )
_UpperCamelCase: Tuple = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
_UpperCamelCase: Optional[Any] = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowercase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __magic_name__ ( __a , unittest.TestCase , __a ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Optional[int] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: List[Any] = FlaxBlenderbotModelTester(self )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase: Union[str, Any] = self._prepare_for_class(_lowercase , _lowercase )
_UpperCamelCase: Any = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase : Optional[Any] , _lowercase : Any=None , **_lowercase : List[Any] ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase: List[Any] = encode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase: str = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase: Tuple = model_class(_lowercase )
_UpperCamelCase: Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase: Optional[int] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase : Any , _lowercase : Union[str, Any] , _lowercase : str ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase: Union[str, Any] = decode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase: Optional[Any] = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase: Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase: List[str] = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCamelCase: List[Any] = model(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
_UpperCamelCase: Union[str, Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_UpperCamelCase: Union[str, Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_lowercase )
_UpperCamelCase: Any = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_UpperCamelCase: Optional[Any] = ['''Sam''']
_UpperCamelCase: List[str] = tokenizer(_lowercase , return_tensors='''jax''' )
_UpperCamelCase: Dict = model.generate(**_lowercase , **_lowercase )
_UpperCamelCase: Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
_UpperCamelCase: List[Any] = tokenizer.batch_decode(_lowercase , **_lowercase )
assert generated_txt[0].strip() == tgt_text | 271 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.txt'}
snake_case_ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
snake_case_ = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__="<unk>" , lowercase__="<cls>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__="<eos>" , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = load_vocab_file(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE_ : Any = unk_token
SCREAMING_SNAKE_CASE_ : str = cls_token
SCREAMING_SNAKE_CASE_ : Dict = pad_token
SCREAMING_SNAKE_CASE_ : Tuple = mask_token
SCREAMING_SNAKE_CASE_ : Any = eos_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self._id_to_token.get(lowercase__ , self.unk_token )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self._token_to_id.get(lowercase__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
return text.split()
def __lowerCamelCase ( self , lowercase__=False ):
"""simple docstring"""
return len(self._id_to_token )
def __lowerCamelCase ( self ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self._token_to_id.get(lowercase__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self._id_to_token.get(lowercase__ , self.unk_token )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1] + ([0] * len(lowercase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowercase__ ) + [1]
return mask
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(lowercase__ , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(lowercase__ , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
return super()._add_tokens(lowercase__ , special_tokens=lowercase__ )
| 721 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : int = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : str = logging.get_verbosity()
self.assertEqual(
lowercase__ , lowercase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowercase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : int = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = AlbertTokenizer
__SCREAMING_SNAKE_CASE = AlbertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = AlbertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Union[str, Any] , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = "this is a test"
__snake_case = "this is a test"
return input_text, output_text
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "<pad>"
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(a_ ) , 30_000 )
def A ( self : Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = "I was born in 92000, and this is falsé."
__snake_case = tokenizer.tokenize(a_ )
__snake_case = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(a_ )
__snake_case = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = AlbertTokenizer(a_ , keep_accents=a_ )
__snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [48, 25, 21, 1_289] )
__snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
__snake_case = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = AlbertTokenizer(a_ )
__snake_case = tokenizer.encode("sequence builders" )
__snake_case = tokenizer.encode("multi-sequence build" )
__snake_case = tokenizer.build_inputs_with_special_tokens(a_ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 69 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} )
_SCREAMING_SNAKE_CASE = field(
default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for training."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} )
_SCREAMING_SNAKE_CASE = field(
default=10_000 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2E-4 ,metadata={"""help""": """Learning rate fo training."""} )
_SCREAMING_SNAKE_CASE = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} )
_SCREAMING_SNAKE_CASE = field(
default=750 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
_SCREAMING_SNAKE_CASE = field(
default=16 ,metadata={"""help""": """Number of gradient accumulation steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
_SCREAMING_SNAKE_CASE = field(default=50_000 ,metadata={"""help""": """Maximum number of training steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Sequence lengths used for training."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Training seed."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_024 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Length of sequences to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Sample from the language model's output distribution."""} )
_SCREAMING_SNAKE_CASE = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=256 ,metadata={"""help""": """Maximum number of newly generated tokens."""} )
_SCREAMING_SNAKE_CASE = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.9_5 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
_SCREAMING_SNAKE_CASE = field(default=10 ,metadata={"""help""": """Number of generations to run in parallel."""} )
_SCREAMING_SNAKE_CASE = field(
default=200 ,metadata={"""help""": """Number of completions to generate for each sample."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} ,)
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} ,)
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default=100_000 ,metadata={"""help""": """Number of files to save per JSON output file."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_000 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=100 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.2_5 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """If True, near-duplicate samples are removed."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.8_5 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(default=200_000 ,metadata={"""help""": """Number of examples to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(
default=32_768 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
_SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 326 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A_ : List[str] =True
except (ImportError, ModuleNotFoundError):
A_ : str =False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> str:
re.sub('<n>' , '' , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 222 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> float:
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> float:
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = symbols(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = starting_point
while True:
if diff_function(lowercase__ ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 663 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =WavaVecaPhonemeCTCTokenizer
UpperCamelCase__ : List[Any] =False
def A__ ( self : str ):
super().setUp()
lowercase__ = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) )
lowercase__ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
def A__ ( self : Optional[Any], __lowercase : Union[str, Any], __lowercase : Optional[int]=False, __lowercase : Optional[int]=20, __lowercase : str=5 ):
lowercase__ = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=__lowercase )) for i in range(len(__lowercase ) )]
lowercase__ = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1], do_phonemize=__lowercase ), __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(__lowercase, clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=__lowercase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
return output_txt, output_ids
def A__ ( self : Dict, **__lowercase : Optional[int] ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : Dict ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
lowercase__ = tokenizer("m xxx ɪ", do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase, [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
lowercase__ = tokenizer("m aaa ɪ ccc", do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase, [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowercase__ = tokenizer("maɪ c", do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase, [3, 200] ) # mai should be <unk> (=3)
def A__ ( self : Union[str, Any] ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(__lowercase, "h ə l oʊ h aʊ ɑːɹ j uː" )
def A__ ( self : List[str] ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowercase ).input_ids, tokenizer(__lowercase, do_phonemize=__lowercase ).input_ids )
def A__ ( self : str ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
lowercase__ = tokenizer.decode(tokenizer(__lowercase ).input_ids )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Any ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowercase__ = tokenizer.decode(sample_ids[0] )
lowercase__ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase, batch_tokens[0] )
self.assertEqual(__lowercase, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def A__ ( self : str ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(__lowercase, "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowercase ).input_ids, tokenizer(__lowercase, do_phonemize=__lowercase ).input_ids )
def A__ ( self : Tuple ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowercase__ = tokenizer.decode(sample_ids[0] )
lowercase__ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase, batch_tokens[0] )
self.assertEqual(__lowercase, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
lowercase__ = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=__lowercase )
lowercase__ = tokenizer.batch_decode(__lowercase, filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase, batch_tokens[0] )
self.assertEqual(__lowercase, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def A__ ( self : int ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
lowercase__ = tokenizer.decode(tokenizer(__lowercase ).input_ids, filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
lowercase__ = tokenizer.decode(tokenizer(__lowercase ).input_ids, filter_word_delimiter_token=__lowercase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip(), __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=__lowercase )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer(__lowercase, phonemizer_lang="en-us" ).input_ids
lowercase__ = tokenizer(__lowercase, phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__lowercase, __lowercase )
lowercase__ = tokenizer.decode(__lowercase )
lowercase__ = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase, "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__lowercase, "ɛ l o h aʊ a ʁ j u" )
def A__ ( self : List[Any] ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how Are you"
lowercase__ = "hello how are you"
lowercase__ = tokenizer(__lowercase ).input_ids
lowercase__ = tokenizer(__lowercase ).input_ids
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Dict ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowercase__ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase, ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def A__ ( __lowercase : Tuple, __lowercase : Any ):
lowercase__ = [d[key] for d in offsets]
return retrieved_list
def A__ ( self : List[Any] ):
lowercase__ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowercase__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowercase__ = tokenizer.decode(__lowercase, output_char_offsets=__lowercase, filter_word_delimiter_token=__lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ), 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__lowercase, __lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char" ) ), outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "char" ), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "start_offset" ), [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "end_offset" ), [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def A__ ( self : List[Any] ):
lowercase__ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__lowercase : Optional[int], __lowercase : Optional[int] ):
self.assertTrue(isinstance(__lowercase, __lowercase ) )
self.assertTrue(isinstance(outputs_list[0], __lowercase ) )
# transform list to ModelOutput
lowercase__ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"], outputs_batch_a["text"] )
def recursive_check(__lowercase : Dict, __lowercase : Tuple ):
if isinstance(__lowercase, __lowercase ):
[recursive_check(__lowercase, __lowercase ) for la, la in zip(__lowercase, __lowercase )]
self.assertEqual(__lowercase, __lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"], outputs_batch_a["char_offsets"] )
# fmt: off
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowercase__ = tokenizer.batch_decode(__lowercase, output_char_offsets=__lowercase )
lowercase__ = [tokenizer.decode(__lowercase, output_char_offsets=__lowercase ) for ids in sample_ids]
check_list_tuples_equal(__lowercase, __lowercase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def A__ ( self : Optional[int] ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def A__ ( self : Optional[int] ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def A__ ( self : Any ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def A__ ( self : Dict ):
pass
def A__ ( self : Optional[Any] ):
lowercase__ = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = tokenizer.vocab_size
lowercase__ = len(__lowercase )
self.assertNotEqual(__lowercase, 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase__ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
lowercase__ = tokenizer.add_tokens(__lowercase )
lowercase__ = tokenizer.vocab_size
lowercase__ = len(__lowercase )
self.assertNotEqual(__lowercase, 0 )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, len(__lowercase ) )
self.assertEqual(__lowercase, all_size + len(__lowercase ) )
lowercase__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ), 4 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
lowercase__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
lowercase__ = tokenizer.add_special_tokens(__lowercase )
lowercase__ = tokenizer.vocab_size
lowercase__ = len(__lowercase )
self.assertNotEqual(__lowercase, 0 )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, len(__lowercase ) )
self.assertEqual(__lowercase, all_size_a + len(__lowercase ) )
lowercase__ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ), 6 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0], tokens[1] )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokens[-4] )
self.assertEqual(tokens[0], tokenizer.eos_token_id )
self.assertEqual(tokens[-3], tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def A__ ( self : List[Any] ):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def A__ ( self : str ):
pass
def A__ ( self : int ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowercase__ = self.get_tokenizers(fast=__lowercase, do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
lowercase__ = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(output["text"], __lowercase )
| 37 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Any = 1
while repunit:
UpperCAmelCase__ : Union[str, Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def a__ ( lowerCAmelCase = 1_00_00_00 ) -> int:
UpperCAmelCase__ : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 182 |
"""simple docstring"""
from __future__ import annotations
import math
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase__ : Tuple = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase__ : int = [0 for i in range(0 , 4 * size )]
UpperCAmelCase__ : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return idx * 2
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if left_element == right_element:
UpperCAmelCase__ : Union[str, Any] = a[left_element - 1]
else:
UpperCAmelCase__ : List[str] = (left_element + right_element) // 2
self.build(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.build(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase__ : Union[str, Any] = self.lazy[idx]
UpperCAmelCase__ : Dict = False
if left_element != right_element:
UpperCAmelCase__ : Optional[Any] = self.lazy[idx]
UpperCAmelCase__ : Optional[int] = self.lazy[idx]
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : str = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase__ : Union[str, Any] = val
if left_element != right_element:
UpperCAmelCase__ : Union[str, Any] = val
UpperCAmelCase__ : List[Any] = val
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : List[Any] = True
return True
UpperCAmelCase__ : Tuple = (left_element + right_element) // 2
self.update(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.update(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
return True
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase__ : List[Any] = self.lazy[idx]
UpperCAmelCase__ : Optional[Any] = False
if left_element != right_element:
UpperCAmelCase__ : Tuple = self.lazy[idx]
UpperCAmelCase__ : Union[str, Any] = self.lazy[idx]
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase__ : Union[str, Any] = (left_element + right_element) // 2
UpperCAmelCase__ : Tuple = self.query(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = self.query(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return max(_lowerCamelCase , _lowerCamelCase )
def __str__(self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _lowerCamelCase , _lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 182 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
# ===== initialization =====
_lowercase : Tuple = Mock()
_lowercase : Optional[Any] = conn, Mock()
_lowercase : Any = iter([1, None] )
_lowercase : Dict = lambda lowerCamelCase_ : next(lowerCamelCase_ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=lowerCamelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 720 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
__lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
__lowerCAmelCase : List[Any] = {
"""abeja/gpt-neox-japanese-2.7b""": 20_48,
}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [[t.rstrip("""\n""" )] if (t == ''',''' or ''',''' not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(lowerCamelCase__ ):
lowerCAmelCase__ = b
lowerCAmelCase__ = idx
for wd in b:
lowerCAmelCase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a_ ( a_ ):
UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self : str , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any]="<|endoftext|>" , snake_case__ : Optional[Any]="<|endoftext|>" , snake_case__ : Optional[int]="<|startoftext|>" , snake_case__ : List[str]="<|endoftext|>" , snake_case__ : Dict=False , **snake_case__ : List[Any] , ):
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_snake_case ):
raise ValueError(
F"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
lowerCAmelCase__ = do_clean_text
lowerCAmelCase__ = load_vocab_and_emoji(_snake_case , _snake_case )
lowerCAmelCase__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _SCREAMING_SNAKE_CASE ( self : int ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Tuple ):
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Dict ):
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, Any] ):
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Tuple ):
lowerCAmelCase__ = ''''''.join(_snake_case ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Dict ):
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
lowerCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Tuple , snake_case__ : Optional[int] = None ):
lowerCAmelCase__ = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase__ = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
lowerCAmelCase__ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowerCAmelCase__ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase__ = token_index
writer.write(""",""".join(_snake_case ) + """\n""" )
index += 1
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class a_ ( a_ ):
def __init__( self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
lowerCAmelCase__ = vocab # same as swe
lowerCAmelCase__ = ids_to_tokens # same as bpe
lowerCAmelCase__ = emoji
lowerCAmelCase__ = np.max([len(_snake_case ) for w in self.vocab.keys()] )
lowerCAmelCase__ = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
lowerCAmelCase__ = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
lowerCAmelCase__ = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
lowerCAmelCase__ = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
lowerCAmelCase__ = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
lowerCAmelCase__ = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
lowerCAmelCase__ = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowerCAmelCase__ = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowerCAmelCase__ = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : int ):
return len(self.ids_to_tokens )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
lowerCAmelCase__ = self.content_repattera.sub("""<URL>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<EMAIL>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<TEL>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<DATE>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<DATE>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<PRICE>""" , _snake_case )
lowerCAmelCase__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str]=False ):
lowerCAmelCase__ = text.replace(""" """ , """<SP>""" )
lowerCAmelCase__ = text.replace(""" """ , """<SP>""" )
lowerCAmelCase__ = text.replace("""\r\n""" , """<BR>""" )
lowerCAmelCase__ = text.replace("""\n""" , """<BR>""" )
lowerCAmelCase__ = text.replace("""\r""" , """<BR>""" )
lowerCAmelCase__ = text.replace("""\t""" , """<TAB>""" )
lowerCAmelCase__ = text.replace("""—""" , """ー""" )
lowerCAmelCase__ = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ = text.replace(_snake_case , _snake_case )
if clean:
lowerCAmelCase__ = self.clean_text(_snake_case )
def check_simbol(snake_case__ : List[str] ):
lowerCAmelCase__ = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
lowerCAmelCase__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(snake_case__ : Tuple ):
lowerCAmelCase__ = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
lowerCAmelCase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
while pos < len(_snake_case ):
lowerCAmelCase__ = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowerCAmelCase__ = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
lowerCAmelCase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
lowerCAmelCase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ = sorted(_snake_case , key=lambda snake_case__ : x[0] )[0]
result.append(_snake_case )
lowerCAmelCase__ = e
else:
lowerCAmelCase__ = pos + 1
lowerCAmelCase__ = text[pos:end]
if check_simbol(_snake_case ):
result.append("""<KIGOU>""" )
elif checkuae(_snake_case ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
lowerCAmelCase__ = end
return result
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int="\n" ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
lowerCAmelCase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
lowerCAmelCase__ = ''''''.join(_snake_case )
return text
| 644 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = ["image_processor", "tokenizer"]
__lowerCAmelCase : Optional[Any] = "BridgeTowerImageProcessor"
__lowerCAmelCase : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _A , _A):
super().__init__(_snake_case , _snake_case)
def __call__( self , _A , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case)
encoding.update(_snake_case)
return encoding
def lowerCAmelCase__ ( self , *_A , **_A):
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCAmelCase__ ( self , *_A , **_A):
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 708 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase__ : int = Lock()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 620 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a : List[Any] = logging.get_logger(__name__)
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
__lowercase = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__lowercase = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__lowercase = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__lowercase = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__lowercase = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__lowercase = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
__lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
__lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowercase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowercase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__lowercase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__lowercase = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__lowercase = pt_tuple_key[-2] + '''_v'''
if name is not None:
__lowercase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowercase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__lowercase = flax_model.params['''params''']
else:
__lowercase = flax_model.params
__lowercase = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowercase = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(_UpperCamelCase )
__lowercase = {}
__lowercase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
__lowercase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowercase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
__lowercase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowercase = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowercase , __lowercase = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
__lowercase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowercase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__lowercase = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
__lowercase = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
__lowercase = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
import torch
# Load the index
__lowercase = {}
for shard_file in shard_filenames:
# load using msgpack utils
__lowercase = torch.load(_UpperCamelCase )
__lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowercase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowercase = flax_model.params['''params''']
__lowercase = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
__lowercase = flax_model.params
__lowercase = flatten_dict(_UpperCamelCase )
__lowercase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
__lowercase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowercase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
__lowercase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowercase = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowercase , __lowercase = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
__lowercase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowercase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__lowercase = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
__lowercase = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
__lowercase = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
__lowercase = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__lowercase = getattr(_UpperCamelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , '''rb''' ) as state_f:
try:
__lowercase = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowercase = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowercase = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
__lowercase = flatten_dict(_UpperCamelCase )
__lowercase = pt_model.state_dict()
__lowercase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
__lowercase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__lowercase = []
__lowercase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowercase = flax_key_tuple[0] == pt_model.base_model_prefix
__lowercase = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowercase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__lowercase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__lowercase = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
__lowercase = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
__lowercase = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__lowercase = '''.'''.join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__lowercase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__lowercase = key.split('''.''' )
__lowercase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__lowercase = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
__lowercase = key_components[-2] + '''_v'''
if name is not None:
__lowercase = key_components[:-3] + [name]
__lowercase = '''.'''.join(_UpperCamelCase )
__lowercase = key
if flax_key in special_pt_names:
__lowercase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowercase = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
__lowercase = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
__lowercase = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 639 |
from __future__ import annotations
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> None:
'''simple docstring'''
__lowercase = order
# a_{0} ... a_{k}
__lowercase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__lowercase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__lowercase = [0.0] * self.order
# y[n-1] ... y[n-k]
__lowercase = [0.0] * self.order
def A ( self , snake_case_ , snake_case_ ) -> None:
'''simple docstring'''
if len(snake_case_ ) < self.order:
__lowercase = [1.0, *a_coeffs]
if len(snake_case_ ) != self.order + 1:
__lowercase = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case_ )}'
)
raise ValueError(snake_case_ )
if len(snake_case_ ) != self.order + 1:
__lowercase = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case_ )}'
)
raise ValueError(snake_case_ )
__lowercase = a_coeffs
__lowercase = b_coeffs
def A ( self , snake_case_ ) -> float:
'''simple docstring'''
__lowercase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__lowercase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__lowercase = self.input_history[:-1]
__lowercase = self.output_history[:-1]
__lowercase = sample
__lowercase = result
return result
| 639 | 1 |
"""simple docstring"""
import functools
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] , __UpperCamelCase : float = 0.9_9_9_9 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[float, int] = 1.0 , __UpperCamelCase : Union[float, int] = 2 / 3 , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Dict[str, Any] = None , **__UpperCamelCase : Optional[Any] , )->Tuple:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase = True
if kwargs.get('''max_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''min_value''']
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs['''device'''] )
_UpperCAmelCase = None
_UpperCAmelCase = decay
_UpperCAmelCase = min_decay
_UpperCAmelCase = update_after_step
_UpperCAmelCase = use_ema_warmup
_UpperCAmelCase = inv_gamma
_UpperCAmelCase = power
_UpperCAmelCase = 0
_UpperCAmelCase = None # set in `step()`
_UpperCAmelCase = model_cls
_UpperCAmelCase = model_config
@classmethod
def lowercase__ ( cls : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Any )->"EMAModel":
_UpperCAmelCase , _UpperCAmelCase = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
_UpperCAmelCase = model_cls.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def lowercase__ ( self : str , __UpperCamelCase : Any )->Optional[Any]:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_UpperCAmelCase = self.model_cls.from_config(self.model_config )
_UpperCAmelCase = self.state_dict()
state_dict.pop('''shadow_params''' , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : int )->float:
_UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase = (1 + step) / (1_0 + step)
_UpperCAmelCase = min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase = max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->Optional[Any]:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
_UpperCAmelCase = list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase = self.get_decay(self.optimization_step )
_UpperCAmelCase = decay
_UpperCAmelCase = 1 - decay
_UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : List[str] , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]=None )->None:
_UpperCAmelCase = [
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] )->dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Any , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase = None
def lowercase__ ( self : Any , __UpperCamelCase : dict )->None:
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
_UpperCAmelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_UpperCAmelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError('''Invalid min_decay''' )
_UpperCAmelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError('''Invalid optimization_step''' )
_UpperCAmelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError('''Invalid update_after_step''' )
_UpperCAmelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError('''Invalid use_ema_warmup''' )
_UpperCAmelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_UpperCAmelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_UpperCAmelCase = state_dict.get('''shadow_params''' , __UpperCamelCase )
if shadow_params is not None:
_UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 95 | 1 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[str] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@add_start_docstrings(__a )
def __call__( self : int , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Union[str, Any] ) -> bool:
'''simple docstring'''
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __a : int , __a : Optional[int] = None ) -> List[Any]:
'''simple docstring'''
__snake_case : int = max_length
__snake_case : List[str] = max_position_embeddings
@add_start_docstrings(__a )
def __call__( self : Dict , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : List[Any] ) -> bool:
'''simple docstring'''
__snake_case : Any = input_ids.shape[-1]
__snake_case : Union[str, Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'exceptions, performance degradation, or nothing at all.' )
return is_done
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , __a : int , __a : int ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'with `max_length = start_length + max_new_tokens` instead.' , __a , )
__snake_case : List[Any] = start_length
__snake_case : Dict = max_new_tokens
__snake_case : Tuple = start_length + max_new_tokens
@add_start_docstrings(__a )
def __call__( self : str , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Optional[int] ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __a : float , __a : Optional[float] = None ) -> Any:
'''simple docstring'''
__snake_case : Tuple = max_time
__snake_case : List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__a )
def __call__( self : List[Any] , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Union[str, Any] ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@add_start_docstrings(__a )
def __call__( self : str , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Dict ) -> bool:
'''simple docstring'''
return any(criteria(__a , __a ) for criteria in self )
@property
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(__a , __a ):
return stopping_criterium.max_length
elif isinstance(__a , __a ):
return stopping_criterium.max_length
return None
def a_ ( _UpperCAmelCase : StoppingCriteriaList ,_UpperCAmelCase : int ) -> StoppingCriteriaList:
__snake_case : Union[str, Any] = stopping_criteria.max_length
__snake_case : Optional[int] = deepcopy(_UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' ,_UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCAmelCase ) )
return new_stopping_criteria
| 286 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaInpaintPipeline
A__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
A__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
A__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : int ) -> int:
'''simple docstring'''
return 32
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return 100
@property
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__snake_case : Optional[Any] = UNetaDConditionModel(**__a )
return model
@property
def A_ ( self : Any ) -> Any:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : str = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : int = self.dummy_unet
__snake_case : Dict = self.dummy_movq
__snake_case : int = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type='epsilon' , thresholding=__a , )
__snake_case : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A_ ( self : str , __a : Union[str, Any] , __a : List[str]=0 ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
__snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create init_image
__snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
__snake_case : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((256, 256) )
# create mask
__snake_case : Optional[int] = np.ones((64, 64) , dtype=np.floataa )
__snake_case : List[str] = 0
if str(__a ).startswith('mps' ):
__snake_case : Optional[int] = torch.manual_seed(__a )
else:
__snake_case : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[Any] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case : Tuple = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**__a )
__snake_case : List[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Union[str, Any] = output.images
__snake_case : Optional[int] = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__snake_case : Union[str, Any] = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Optional[int] ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__snake_case : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__snake_case : int = np.ones((768, 768) , dtype=np.floataa )
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = 'a hat'
__snake_case : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__a )
__snake_case : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__snake_case : Optional[Any] = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
__snake_case : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case , __snake_case : Optional[int] = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__snake_case : List[str] = pipeline(
image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__snake_case : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
| 286 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : int = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""input_features"""]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=80 , SCREAMING_SNAKE_CASE__ : Dict=1_60_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_60 , SCREAMING_SNAKE_CASE__ : List[Any]=30 , SCREAMING_SNAKE_CASE__ : List[str]=4_00 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Tuple:
super().__init__(
feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = n_fft
__lowerCAmelCase = hop_length
__lowerCAmelCase = chunk_length
__lowerCAmelCase = chunk_length * sampling_rate
__lowerCAmelCase = self.n_samples // hop_length
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE__ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=SCREAMING_SNAKE_CASE__ , norm="""slaney""" , mel_scale="""slaney""" , )
def a ( self : str , SCREAMING_SNAKE_CASE__ : np.array ) -> np.ndarray:
__lowerCAmelCase = spectrogram(
SCREAMING_SNAKE_CASE__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__lowerCAmelCase = log_spec[:, :-1]
__lowerCAmelCase = np.maximum(SCREAMING_SNAKE_CASE__ , log_spec.max() - 8.0 )
__lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a ( SCREAMING_SNAKE_CASE__ : List[np.ndarray] , SCREAMING_SNAKE_CASE__ : List[np.ndarray] , SCREAMING_SNAKE_CASE__ : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE__ , np.intaa )
__lowerCAmelCase = []
for vector, length in zip(SCREAMING_SNAKE_CASE__ , attention_mask.sum(-1 ) ):
__lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__lowerCAmelCase = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "max_length" , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowerCAmelCase = isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCAmelCase = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [np.asarray([raw_speech] ).T]
__lowerCAmelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__lowerCAmelCase = self.pad(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=max_length if max_length else self.n_samples , truncation=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__lowerCAmelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__lowerCAmelCase = [self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for feature in input_features]
else:
__lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__lowerCAmelCase = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return padded_inputs
def a ( self : Optional[int] ) -> Dict[str, Any]:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 704 | '''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_A : Union[str, Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
_A : Dict = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
_A : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
_A : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
_A : List[str] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def a ( self : Optional[int] ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=[1, 10, 1_00] , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3.0 ) -> Optional[int]:
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE__ ) as executor:
__lowerCAmelCase = []
__lowerCAmelCase = Counter()
__lowerCAmelCase = 0
__lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE__ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
for candidate in candidates:
__lowerCAmelCase = candidate + """\n""" + test_case
__lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase = executor.submit(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
futures.append(SCREAMING_SNAKE_CASE__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for result in results.values():
result.sort()
__lowerCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(SCREAMING_SNAKE_CASE__ ) )
correct.append(sum(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = k
__lowerCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : int ) -> Dict:
'''simple docstring'''
def estimator(snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = itertools.repeat(snake_case_ , len(snake_case_ ) )
else:
assert len(snake_case_ ) == len(snake_case_ )
__lowerCAmelCase = iter(snake_case_ )
return np.array([estimator(int(snake_case_ ) , int(snake_case_ ) , snake_case_ ) for n, c in zip(snake_case_ , snake_case_ )] )
| 330 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : Tuple = (DDPMParallelScheduler,)
def _a ( self , **UpperCamelCase__ ):
"""simple docstring"""
a_ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**UpperCamelCase__ )
return config
def _a ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def _a ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**UpperCamelCase__ )
a_ = len(UpperCamelCase__ )
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
a_ = self.dummy_sample_deter + 0.1
a_ = self.dummy_sample_deter - 0.1
a_ = samplea.shape[0]
a_ = torch.stack([samplea, samplea, samplea] , dim=0 )
a_ = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ )
a_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
a_ = scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
a_ = torch.sum(torch.abs(UpperCamelCase__ ) )
a_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**UpperCamelCase__ )
a_ = len(UpperCamelCase__ )
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
a_ = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
a_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
a_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
a_ = pred_prev_sample
a_ = torch.sum(torch.abs(UpperCamelCase__ ) )
a_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(prediction_type='v_prediction' )
a_ = scheduler_class(**UpperCamelCase__ )
a_ = len(UpperCamelCase__ )
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
a_ = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
a_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
a_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
a_ = pred_prev_sample
a_ = torch.sum(torch.abs(UpperCamelCase__ ) )
a_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**UpperCamelCase__ )
a_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
a_ = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase__ ):
if i == len(UpperCamelCase__ ) - 1:
a_ = -1
else:
a_ = timesteps[i + 1]
a_ = scheduler.previous_timestep(UpperCamelCase__ )
a_ = prev_t.item()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**UpperCamelCase__ )
a_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**UpperCamelCase__ )
a_ = [100, 87, 50, 1, 0]
a_ = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**UpperCamelCase__ )
a_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 536 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
a_ = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
a_ = -1
return False
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int ):
"""simple docstring"""
a_ = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 536 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase_ ( a_ , a_ ):
@register_to_config
def __init__( self , *,
snake_case__ = 4 , snake_case__ = 7_68 , snake_case__ , snake_case__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.Parameter(torch.zeros(snake_case__ ) )
# parameters for additional clip time embeddings
UpperCAmelCase = nn.Linear(snake_case__ , snake_case__ )
UpperCAmelCase = nn.Linear(snake_case__ , snake_case__ )
# parameters for encoder hidden states
UpperCAmelCase = clip_extra_context_tokens
UpperCAmelCase = nn.Linear(
snake_case__ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase = nn.Linear(snake_case__ , snake_case__ )
UpperCAmelCase = nn.LayerNorm(snake_case__ )
def UpperCamelCase_ ( self , *, snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase = image_embeddings.shape[0]
UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase = classifier_free_guidance_embeddings.expand(
snake_case__ , -1 )
UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase = self.embedding_proj(snake_case__ )
UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(snake_case__ )
UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase = self.clip_extra_context_tokens_proj(snake_case__ )
UpperCAmelCase = clip_extra_context_tokens.reshape(snake_case__ , -1 , self.clip_extra_context_tokens )
UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase = self.encoder_hidden_states_proj(snake_case__ )
UpperCAmelCase = self.text_encoder_hidden_states_norm(snake_case__ )
UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 378 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( a_ ):
_A : List[str] = ['pixel_values']
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = 0.9 , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = None , snake_case__ = 1 / 2_55 , snake_case__ = True , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = crop_pct
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCAmelCase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCAmelCase = int(size["""height"""] / crop_pct )
else:
UpperCAmelCase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
else:
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 378 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase (_lowerCamelCase : Union[dict, list, tuple, torch.Tensor] )-> List[Tuple[int, ...]]:
'''simple docstring'''
__snake_case = []
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...] )-> Tuple[int, ...]:
'''simple docstring'''
__snake_case = []
for d in reversed(_lowerCamelCase ):
idx.append(flat_idx % d )
__snake_case = flat_idx // d
return tuple(reversed(_lowerCamelCase ) )
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , )-> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(_lowerCamelCase : List[bool] ) -> None:
__snake_case = True
for i in range(len(_lowerCamelCase ) ):
__snake_case = -1 * (i + 1)
l[reversed_idx] &= tally
__snake_case = l[reversed_idx]
if start_edges is None:
__snake_case = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase )
if end_edges is None:
__snake_case = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase )]
reduce_edge_list(_lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase ) == 0:
return [()]
elif len(_lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__snake_case = []
__snake_case = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase ):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1 ) )
else:
break
__snake_case = tuple(_lowerCamelCase )
__snake_case = len(_lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__snake_case = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> torch.Tensor:
'''simple docstring'''
__snake_case = t.shape[:no_batch_dims]
__snake_case = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
__snake_case = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase ) )
# Get an ordered list of slices to perform
__snake_case = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
__snake_case = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _UpperCamelCase (_lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , )-> Any:
'''simple docstring'''
if not (len(_lowerCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
__snake_case = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase )]
__snake_case = tuple([max(_lowerCamelCase ) for s in zip(*_lowerCamelCase )] )
def _prep_inputs(_lowerCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__snake_case = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__snake_case = tensor_tree_map(_prep_inputs , _lowerCamelCase )
__snake_case = None
if _out is not None:
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__snake_case = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__snake_case = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__snake_case = 0
__snake_case = prepped_outputs
for _ in range(_lowerCamelCase ):
# Chunk the input
if not low_mem:
__snake_case = _select_chunk
else:
__snake_case = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size ) , no_batch_dims=len(_lowerCamelCase ) , )
__snake_case = tensor_tree_map(_lowerCamelCase , _lowerCamelCase )
# Run the layer on the chunk
__snake_case = layer(**_lowerCamelCase )
# Allocate space for the output
if out is None:
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase ):
def assign(_lowerCamelCase : dict , _lowerCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
assign(_lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__snake_case = da[k]
assign(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__snake_case = xa
elif isinstance(_lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__snake_case = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , _lowerCamelCase )
return out
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE = 512 , ) -> List[Any]:
'''simple docstring'''
__snake_case = max_chunk_size
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__snake_case = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__snake_case = [c for c in candidates if c > min_chunk_size]
__snake_case = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*__SCREAMING_SNAKE_CASE , chunk_size=__SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
__snake_case = 0
__snake_case = len(__SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
__snake_case = test_chunk_size(candidates[i] )
if not viable:
__snake_case = (min_viable_chunk_size_index + i) // 2
else:
__snake_case = i
__snake_case = (i + len(__SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__snake_case = True
for aa, aa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert type(__SCREAMING_SNAKE_CASE ) == type(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
__snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
__snake_case = True
__snake_case = tree_map(lambda __SCREAMING_SNAKE_CASE : a.shape if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) else a , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__SCREAMING_SNAKE_CASE )
__snake_case = self._compare_arg_caches(self.cached_arg_data , __SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
__snake_case = False
if not consistent:
__snake_case = self._determine_favorable_chunk_size(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
__snake_case = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = KandinskyVaaPriorPipeline
lowercase__ : List[str] = ["prompt"]
lowercase__ : Union[str, Any] = ["prompt", "negative_prompt"]
lowercase__ : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase__ : str = False
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return 1_00
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowerCAmelCase__ = PriorTransformer(**lowerCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
lowerCAmelCase__ = CLIPVisionModelWithProjection(lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=lowerCamelCase_ , do_normalize=lowerCamelCase_ , do_resize=lowerCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.dummy_prior
lowerCAmelCase__ = self.dummy_image_encoder
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_image_processor
lowerCAmelCase__ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=lowerCamelCase_ , clip_sample_range=10.0 , )
lowerCAmelCase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> int:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase__ = output.image_embeds
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
lowerCAmelCase__ = image[0, -10:]
lowerCAmelCase__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowerCAmelCase__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = torch_device == '''cpu'''
lowerCAmelCase__ = True
lowerCAmelCase__ = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , )
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = torch_device == '''cpu'''
lowerCAmelCase__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , ) | 90 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =42
__a =42
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> list[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
__a = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__a = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int ) -> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
__a = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
__a = [''] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCamelCase : Any = """Provide a string that I will generate its BWT transform: """
__UpperCamelCase : Dict = input(entry_msg).strip()
__UpperCamelCase : Optional[int] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
__UpperCamelCase : Dict = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
) | 448 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCamelCase : Tuple = """base_with_context"""
def a_ ( _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
snake_case__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case__ = weights[f'''layers_{lyr_num}''']
snake_case__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case__ = ly_weight['attention']
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
snake_case__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case__ = weights[f'''layers_{lyr_num}''']
snake_case__ = ly_weight['attention']
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( _A , _A ) -> Any:
"""simple docstring"""
snake_case__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
snake_case__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_A )
snake_case__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case__ = weights[f'''layers_{lyr_num}''']
snake_case__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
snake_case__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
snake_case__ = ly_weight['self_attention']
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case__ = ly_weight['MultiHeadDotProductAttention_0']
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
snake_case__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case__ = jnp.tree_util.tree_map(onp.array , _A )
snake_case__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
snake_case__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
snake_case__ = inference.parse_training_gin_file(_A , _A )
snake_case__ = inference.InferenceModel(args.checkpoint_path , _A )
snake_case__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
snake_case__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _A )
snake_case__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _A )
snake_case__ = load_decoder(ta_checkpoint['target']['decoder'] , _A )
snake_case__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
snake_case__ = SpectrogramDiffusionPipeline(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__UpperCamelCase : Dict = parser.parse_args()
main(args)
| 701 |
def a_ ( _A , _A ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(_A ) , _A )
return number - int(_A )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 372 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( *lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =list(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
__magic_name__ : Dict =None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCamelCase , lowerCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCAmelCase_ ( lowerCamelCase = None , lowerCamelCase = 128 ):
if function is None:
return functools.partial(lowerCamelCase , starting_batch_size=lowerCamelCase )
__magic_name__ : List[Any] =starting_batch_size
def decorator(*lowerCamelCase , **lowerCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__magic_name__ : Optional[Any] =list(inspect.signature(lowerCamelCase ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase ) < (len(lowerCamelCase ) + 1):
__magic_name__ : Optional[int] =""", """.join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 21 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , snake_case ) -> Dict:
"""simple docstring"""
super().__init__()
a__ : int = model
a__ : Any = 2
a__ : List[str] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
pass
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load longformer model from model identifier
a__ : List[str] = LongformerModel.from_pretrained(lowerCamelCase )
a__ : Dict = LightningModel(lowerCamelCase )
a__ : str = torch.load(lowerCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
a__ : Dict = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 112 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 16_000 ) -> Any:
"""simple docstring"""
snake_case : str = int(round(sample_rate * max_length ) )
if len(__magic_name__ ) <= sample_length:
return wav
snake_case : int = randint(0 , len(__magic_name__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a_ :
A__ : Optional[str] = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'A file containing the training audio paths and labels.'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'A file containing the validation audio paths and labels.'} )
A__ : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
A__ : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
A__ : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
A__ : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A__ : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class a_ :
A__ : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
A__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Name or path of preprocessor config.'} )
A__ : bool = field(
default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
A__ : bool = field(
default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
A__ : bool = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ : Optional[bool] = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
A__ : bool = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def a_ ( ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
snake_case : Any = DatasetDict()
snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case : int = feature_extractor.model_input_names[0]
def train_transforms(__magic_name__ ):
snake_case : Optional[Any] = []
for audio in batch[data_args.audio_column_name]:
snake_case : Any = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__magic_name__ )
snake_case : Dict = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate )
snake_case : Optional[Any] = {model_input_name: inputs.get(__magic_name__ )}
snake_case : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__magic_name__ ):
snake_case : str = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
snake_case : Union[str, Any] = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate )
snake_case : str = {model_input_name: inputs.get(__magic_name__ )}
snake_case : Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : int = raw_datasets['''train'''].features[data_args.label_column_name].names
snake_case : int = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case : str = str(__magic_name__ )
snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case : Union[str, Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
snake_case : Any = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__magic_name__ , references=eval_pred.label_ids )
snake_case : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : int = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case : Dict = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__magic_name__ , output_all_columns=__magic_name__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case : Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__magic_name__ , output_all_columns=__magic_name__ )
# Initialize our trainer
snake_case : str = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , )
# Training
if training_args.do_train:
snake_case : List[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : Optional[int] = last_checkpoint
snake_case : List[str] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Write model card and (optionally) push to hub
snake_case : Tuple = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 705 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Union[str, Any] = """encodec"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=[1.5, 3.0, 6.0, 12.0, 24.0] , __SCREAMING_SNAKE_CASE : List[Any]=24000 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Optional[int]=[8, 5, 4, 2] , __SCREAMING_SNAKE_CASE : Any="weight_norm" , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int="reflect" , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=1024 , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Union[str, Any]:
lowerCamelCase_ = target_bandwidths
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = audio_channels
lowerCamelCase_ = normalize
lowerCamelCase_ = chunk_length_s
lowerCamelCase_ = overlap
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_filters
lowerCamelCase_ = num_residual_layers
lowerCamelCase_ = upsampling_ratios
lowerCamelCase_ = norm_type
lowerCamelCase_ = kernel_size
lowerCamelCase_ = last_kernel_size
lowerCamelCase_ = residual_kernel_size
lowerCamelCase_ = dilation_growth_rate
lowerCamelCase_ = use_causal_conv
lowerCamelCase_ = pad_mode
lowerCamelCase_ = compress
lowerCamelCase_ = num_lstm_layers
lowerCamelCase_ = trim_right_ratio
lowerCamelCase_ = codebook_size
lowerCamelCase_ = codebook_dim if codebook_dim is not None else hidden_size
lowerCamelCase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase ( self : List[str] ) -> int:
lowerCamelCase_ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase ( self : List[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 549 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = 8.314_4598
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_SCREAMING_SNAKE_CASE : List[Any] = 300
_SCREAMING_SNAKE_CASE : Dict = 28
_SCREAMING_SNAKE_CASE : List[str] = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 549 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__lowerCamelCase : Optional[Any] = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
__lowerCamelCase : Optional[Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
__lowerCamelCase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCamelCase : Any = value
elif weight_type == "weight_v":
__lowerCamelCase : Dict = value
elif weight_type == "bias":
__lowerCamelCase : int = value
else:
__lowerCamelCase : Optional[Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[Any] = fairseq_model.state_dict()
__lowerCamelCase : str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase : Tuple = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase : Union[str, Any] = True
if "*" in mapped_key:
__lowerCamelCase : Dict = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
__lowerCamelCase : Any = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
__lowerCamelCase : str = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : Union[str, Any] = """weight_v"""
elif "weight" in name:
__lowerCamelCase : Any = """weight"""
elif "bias" in name:
__lowerCamelCase : Any = """bias"""
else:
__lowerCamelCase : Union[str, Any] = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ):
"""simple docstring"""
__lowerCamelCase : int = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase : Optional[Any] = name.split(""".""" )
__lowerCamelCase : Optional[int] = int(items[0] )
__lowerCamelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Tuple ):
"""simple docstring"""
__lowerCamelCase : List[str] = SEWConfig()
if is_finetuned:
__lowerCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
__lowerCamelCase : Any = model.cfg
__lowerCamelCase : List[str] = fs_config.conv_bias
__lowerCamelCase : List[str] = eval(fs_config.conv_feature_layers )
__lowerCamelCase : Any = [x[0] for x in conv_layers]
__lowerCamelCase : Tuple = [x[1] for x in conv_layers]
__lowerCamelCase : List[str] = [x[2] for x in conv_layers]
__lowerCamelCase : Dict = """gelu"""
__lowerCamelCase : str = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__lowerCamelCase : Any = 0.0
__lowerCamelCase : Optional[int] = fs_config.activation_fn.name
__lowerCamelCase : Optional[Any] = fs_config.encoder_embed_dim
__lowerCamelCase : List[str] = 0.0_2
__lowerCamelCase : Dict = fs_config.encoder_ffn_embed_dim
__lowerCamelCase : Dict = 1e-5
__lowerCamelCase : str = fs_config.encoder_layerdrop
__lowerCamelCase : int = fs_config.encoder_attention_heads
__lowerCamelCase : List[str] = fs_config.conv_pos_groups
__lowerCamelCase : Tuple = fs_config.conv_pos
__lowerCamelCase : Optional[Any] = len(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = fs_config.encoder_layers
__lowerCamelCase : str = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowerCamelCase : Any = model.cfg
__lowerCamelCase : Dict = fs_config.final_dropout
__lowerCamelCase : Any = fs_config.layerdrop
__lowerCamelCase : Union[str, Any] = fs_config.activation_dropout
__lowerCamelCase : Tuple = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowerCamelCase : List[str] = fs_config.attention_dropout
__lowerCamelCase : Union[str, Any] = fs_config.dropout_input
__lowerCamelCase : List[Any] = fs_config.dropout
__lowerCamelCase : List[str] = fs_config.mask_channel_length
__lowerCamelCase : Optional[Any] = fs_config.mask_channel_prob
__lowerCamelCase : Optional[Any] = fs_config.mask_length
__lowerCamelCase : Optional[Any] = fs_config.mask_prob
__lowerCamelCase : str = """Wav2Vec2FeatureExtractor"""
__lowerCamelCase : List[str] = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=True ):
"""simple docstring"""
if is_finetuned:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowerCamelCase : List[str] = SEWConfig.from_pretrained(UpperCAmelCase )
else:
__lowerCamelCase : List[Any] = convert_config(model[0] , UpperCAmelCase )
__lowerCamelCase : Dict = model[0].eval()
__lowerCamelCase : Any = True if config.feat_extract_norm == """layer""" else False
__lowerCamelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
if is_finetuned:
if dict_path:
__lowerCamelCase : Union[str, Any] = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : int = target_dict.pad_index
__lowerCamelCase : Optional[Any] = target_dict.bos_index
__lowerCamelCase : Union[str, Any] = target_dict.pad_index
__lowerCamelCase : Union[str, Any] = target_dict.bos_index
__lowerCamelCase : List[str] = target_dict.eos_index
__lowerCamelCase : Tuple = len(target_dict.symbols )
__lowerCamelCase : Optional[int] = os.path.join(UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase )
__lowerCamelCase : int = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCAmelCase , )
__lowerCamelCase : List[str] = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Tuple = SEWForCTC(UpperCAmelCase )
else:
__lowerCamelCase : Any = SEWModel(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__UpperCamelCase : int = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 458 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Dict=1_0 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Tuple=3_2 * 8 , _lowerCamelCase : int=3_2 * 8 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=6_4 , ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_auxiliary_loss
__lowerCamelCase : Optional[int] = num_queries
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = min_size
__lowerCamelCase : Optional[Any] = max_size
__lowerCamelCase : str = num_labels
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Optional[Any] = hidden_dim
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
__lowerCamelCase : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
__lowerCamelCase : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
__lowerCamelCase : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase : List[Any] = self.num_queries
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : List[str] = [1, 1, 1, 1]
__lowerCamelCase : Optional[int] = self.num_channels
__lowerCamelCase : Optional[int] = 6_4
__lowerCamelCase : int = 1_2_8
__lowerCamelCase : Any = self.hidden_dim
__lowerCamelCase : List[str] = self.hidden_dim
__lowerCamelCase : List[Any] = self.hidden_dim
return config
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _snake_case ( self : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = output.encoder_hidden_states
__lowerCamelCase : Any = output.pixel_decoder_hidden_states
__lowerCamelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def _snake_case ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[str]=False ):
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase : Any = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : Dict = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Tuple = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
__lowerCamelCase : str = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCamelCase ( A,A,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a_ : Union[str, Any] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
a_ : Optional[Any] = False
a_ : int = False
a_ : List[str] = False
a_ : List[Any] = False
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = MaskaFormerModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _snake_case ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase )
__lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase : Dict = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
__lowerCamelCase : Optional[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowerCamelCase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=_lowerCamelCase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=_lowerCamelCase ).long(),
}
__lowerCamelCase : Any = self.model_tester.get_config()
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : int = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self : Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase : str = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
__lowerCamelCase : Tuple = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Any = True
__lowerCamelCase : Any = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase : str = 1E-4
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self : Dict ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
__lowerCamelCase : str = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : str = model(**_lowerCamelCase )
__lowerCamelCase : Dict = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Tuple = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Any = self.default_image_processor
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : int = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
# masks_queries_logits
__lowerCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase : Any = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
__lowerCamelCase : List[str] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
__lowerCamelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase : List[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowerCamelCase : int = inputs["""pixel_values"""].to(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = [el.to(_lowerCamelCase ) for el in inputs["""mask_labels"""]]
__lowerCamelCase : Union[str, Any] = [el.to(_lowerCamelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 458 | 1 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_snake_case = _symbol_database.Default()
_snake_case = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
_snake_case = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_snake_case = None
_snake_case = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_snake_case = 4_5
_snake_case = 1_5_8_1
_snake_case = 1_5_1_7
_snake_case = 1_5_7_0
_snake_case = 1_5_8_4
_snake_case = 1_7_9_3
_snake_case = 1_7_9_5
_snake_case = 1_9_1_6
_snake_case = 1_8_6_4
_snake_case = 1_9_0_5
_snake_case = 1_9_1_9
_snake_case = 2_4_2_9
_snake_case = 2_2_0_8
_snake_case = 2_4_1_8
_snake_case = 2_3_2_3
_snake_case = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 580 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 580 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = 'sew-d'
def __init__(self , lowercase__=32 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__=2 , lowercase__=5_12 , lowercase__=2_56 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_28 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_56 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
snake_case_ : Dict = hidden_size
snake_case_ : List[str] = feat_extract_norm
snake_case_ : List[str] = feat_extract_activation
snake_case_ : int = list(UpperCAmelCase__ )
snake_case_ : Optional[Any] = list(UpperCAmelCase__ )
snake_case_ : Any = list(UpperCAmelCase__ )
snake_case_ : Union[str, Any] = conv_bias
snake_case_ : Dict = num_conv_pos_embeddings
snake_case_ : List[str] = num_conv_pos_embedding_groups
snake_case_ : Any = len(self.conv_dim )
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Dict = intermediate_size
snake_case_ : List[str] = squeeze_factor
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = position_buckets
snake_case_ : str = share_att_key
snake_case_ : Optional[int] = relative_attention
snake_case_ : Dict = norm_rel_ebd
snake_case_ : str = list(UpperCAmelCase__ )
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : str = hidden_dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Any = activation_dropout
snake_case_ : Optional[Any] = feat_proj_dropout
snake_case_ : List[Any] = final_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : Optional[int] = feature_layer_norm_eps
snake_case_ : str = initializer_range
snake_case_ : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : List[str] = apply_spec_augment
snake_case_ : Optional[int] = mask_time_prob
snake_case_ : Union[str, Any] = mask_time_length
snake_case_ : List[Any] = mask_time_min_masks
snake_case_ : Dict = mask_feature_prob
snake_case_ : Optional[Any] = mask_feature_length
snake_case_ : int = mask_feature_min_masks
# ctc loss
snake_case_ : Optional[Any] = ctc_loss_reduction
snake_case_ : int = ctc_zero_infinity
# sequence classification
snake_case_ : str = use_weighted_layer_sum
snake_case_ : List[Any] = classifier_proj_size
@property
def __UpperCamelCase (self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 717 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> Dict:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=True )-> List[Any]:
"""simple docstring"""
model.train()
lowercase = model(UpperCAmelCase )
lowercase = F.mse_loss(UpperCAmelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=False )-> Tuple:
"""simple docstring"""
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(UpperCAmelCase )
lowercase = RegressionDataset(length=80 )
lowercase = DataLoader(UpperCAmelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase = AdamW(params=model.parameters(), lr=1e-3 )
lowercase = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowercase = LambdaLR(UpperCAmelCase, lr_lambda=lambda UpperCAmelCase : epoch**0.65 )
lowercase = LambdaLR(UpperCAmelCase, lr_lambda=lambda UpperCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase ,lowercase ,lowercase ,lowercase = accelerator.prepare(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
else:
lowercase ,lowercase = accelerator.prepare(UpperCAmelCase, UpperCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
lowercase ,lowercase ,lowercase = get_training_setup(UpperCAmelCase )
# Use a single batch
lowercase ,lowercase = next(iter(UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase ,lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase ,lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase ):
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
else:
# Sync grads
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase = ddp_input[torch.randperm(len(UpperCAmelCase ) )]
def __UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
"""simple docstring"""
lowercase ,lowercase ,lowercase = get_training_setup(UpperCAmelCase )
# Use a single batch
lowercase ,lowercase = next(iter(UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase ,lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase ,lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase ):
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
else:
# Sync grads
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase = ddp_input[torch.randperm(len(UpperCAmelCase ) )]
def __UpperCAmelCase ( UpperCAmelCase=False, UpperCAmelCase=False )-> List[str]:
"""simple docstring"""
lowercase = Accelerator(
split_batches=UpperCAmelCase, dispatch_batches=UpperCAmelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase ,lowercase ,lowercase = get_training_setup(UpperCAmelCase )
for iteration, batch in enumerate(UpperCAmelCase ):
lowercase ,lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase ,lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase ,lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCAmelCase ):
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase = ddp_input[torch.randperm(len(UpperCAmelCase ) )]
GradientState._reset_state()
def __UpperCAmelCase ( UpperCAmelCase=False, UpperCAmelCase=False )-> str:
"""simple docstring"""
lowercase = Accelerator(
split_batches=UpperCAmelCase, dispatch_batches=UpperCAmelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase = get_training_setup(UpperCAmelCase, UpperCAmelCase )
for iteration, batch in enumerate(UpperCAmelCase ):
lowercase ,lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase ,lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase ,lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCAmelCase ):
step_model(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
lowercase = Accelerator()
lowercase = RegressionDataset(length=80 )
lowercase = DataLoader(UpperCAmelCase, batch_size=16 )
lowercase = RegressionDataset(length=96 )
lowercase = DataLoader(UpperCAmelCase, batch_size=16 )
lowercase ,lowercase = accelerator.prepare(UpperCAmelCase, UpperCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase )
if iteration < len(UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase )
if batch_num < len(UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
lowercase = Accelerator()
lowercase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(UpperCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(UpperCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''', f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**', )
test_gradient_accumulation(UpperCAmelCase, UpperCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''', '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''', '''`split_batches=False`, `dispatch_batches=False`**''', )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''', f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**', )
test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase, UpperCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 604 | def __UpperCAmelCase ( UpperCAmelCase = 50 )-> int:
"""simple docstring"""
lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 604 | 1 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase_ :
def _lowercase( self , A ) -> Optional[int]:
raise NotImplementedError()
def _lowercase( self ) -> Any:
raise NotImplementedError()
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A = False , **A ) -> List[Any]:
UpperCAmelCase : Any = tokenizer
UpperCAmelCase : List[str] = skip_prompt
UpperCAmelCase : int = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : int = []
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = True
def _lowercase( self , A ) -> Optional[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
UpperCAmelCase : int = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
UpperCAmelCase : List[str] = text[self.print_len :]
UpperCAmelCase : Any = []
UpperCAmelCase : List[Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Tuple = text[self.print_len :]
self.print_len += len(A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Dict = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(A )
self.on_finalized_text(A )
def _lowercase( self ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase : List[Any] = text[self.print_len :]
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : List[Any] = """"""
UpperCAmelCase : Dict = True
self.on_finalized_text(A , stream_end=A )
def _lowercase( self , A , A = False ) -> Dict:
print(A , flush=A , end="""""" if not stream_end else None )
def _lowercase( self , A ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A = False , A = None , **A ) -> Dict:
super().__init__(A , A , **A )
UpperCAmelCase : Optional[int] = Queue()
UpperCAmelCase : Dict = None
UpperCAmelCase : List[str] = timeout
def _lowercase( self , A , A = False ) -> Union[str, Any]:
self.text_queue.put(A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> List[str]:
return self
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase : str = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : int ):
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , "vision" )
self.check_model_type(lowerCamelCase__ )
def __call__( self : List[Any] , lowerCamelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase__ : Optional[int] ):
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : List[str] , **lowerCamelCase__ : int ):
return {}, {}, {}
def _UpperCamelCase( self : int , lowerCamelCase__ : Any ):
a__ : str = load_image(lowerCamelCase__ )
a__ : int = image.size
a__ : Optional[Any] = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def _UpperCamelCase( self : str , lowerCamelCase__ : int ):
a__ : List[Any] = self.model(**lowerCamelCase__ )
return model_outputs
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
a__ : Tuple = model_outputs.predicted_depth
a__ : Tuple = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=lowerCamelCase__ )
a__ : Optional[Any] = prediction.squeeze().cpu().numpy()
a__ : Tuple = (output * 255 / np.max(lowerCamelCase__ )).astype("uint8" )
a__ : List[Any] = Image.fromarray(lowerCamelCase__ )
a__ : Optional[Any] = {}
a__ : List[str] = predicted_depth
a__ : Tuple = depth
return output_dict
| 37 |
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
def __init__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = order
# a_{0} ... a_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowerCAmelCase = [0.0] * self.order
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
if len(__magic_name__ ) < self.order:
_lowerCAmelCase = [1.0, *a_coeffs]
if len(__magic_name__ ) != self.order + 1:
_lowerCAmelCase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__magic_name__ )}'''
)
raise ValueError(__magic_name__ )
if len(__magic_name__ ) != self.order + 1:
_lowerCAmelCase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__magic_name__ )}'''
)
raise ValueError(__magic_name__ )
_lowerCAmelCase = a_coeffs
_lowerCAmelCase = b_coeffs
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowerCAmelCase = self.input_history[:-1]
_lowerCAmelCase = self.output_history[:-1]
_lowerCAmelCase = sample
_lowerCAmelCase = result
return result
| 589 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
"""simple docstring"""
import math
def __A ( a_ :int) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __A ( a_ :float = 0.1) -> int:
__a : List[str] = 3
__a : List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 52 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b)) | 52 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
lowerCamelCase : List[Any] =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase : Tuple =np.zeros((n + 1,) )
lowerCamelCase : Dict =ya
lowerCamelCase : Any =xa
for k in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[str] =y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case_ = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case_ = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
def remove_articles(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE_ , ''' ''' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(SCREAMING_SNAKE_CASE_ ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] =[any(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return (sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )) * 1_0_0
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Any =[rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase : int =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase : Tuple =scount * numref
lowerCamelCase : Optional[int] =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple =Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase : Tuple =ccount * numref
# KEEP
lowerCamelCase : str =sgramcounter_rep & cgramcounter_rep
lowerCamelCase : Union[str, Any] =keepgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =sgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =0
lowerCamelCase : List[Any] =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : Tuple =1
lowerCamelCase : int =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Tuple =keeptmpscorea / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase : Any =keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase : Optional[Any] =0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase : Optional[int] =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase : int =sgramcounter_rep - cgramcounter_rep
lowerCamelCase : Dict =delgramcounter_rep - rgramcounter
lowerCamelCase : Dict =sgramcounter_rep - rgramcounter
lowerCamelCase : Optional[int] =0
lowerCamelCase : List[Any] =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : str =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Optional[int] =deltmpscorea / len(SCREAMING_SNAKE_CASE_ )
# ADDITION
lowerCamelCase : List[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =set(SCREAMING_SNAKE_CASE_ ) & set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : int =1
lowerCamelCase : List[Any] =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : str =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =ssent.split(''' ''' )
lowerCamelCase : Any =csent.split(''' ''' )
lowerCamelCase : str =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Tuple =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : int =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Dict =[]
lowerCamelCase : Any =[]
for rsent in rsents:
lowerCamelCase : Any =rsent.split(''' ''' )
lowerCamelCase : int =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : str =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : int =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[Any] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[str] =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : str =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Any =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase : List[str] =sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase : int =sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase : Any =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "13a" , SCREAMING_SNAKE_CASE_ = True ) -> Any:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowerCamelCase : Union[str, Any] =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase : List[Any] =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE_ )()(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE_ )
elif tokenizer == "moses":
lowerCamelCase : int =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ , escape=SCREAMING_SNAKE_CASE_ )
elif tokenizer == "penn":
lowerCamelCase : Any =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =sentence
if not return_str:
lowerCamelCase : Union[str, Any] =normalized_sent.split()
return normalized_sent
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if not (len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCamelCase : Dict =0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE_ ) , normalize(SCREAMING_SNAKE_CASE_ ) , [normalize(SCREAMING_SNAKE_CASE_ ) for sent in refs] )
lowerCamelCase : str =sari_score / len(SCREAMING_SNAKE_CASE_ )
return 1_0_0 * sari_score
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="exp" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ) -> Dict:
lowerCamelCase : Optional[int] =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase : Optional[int] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase : Union[str, Any] =sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , smooth_method=SCREAMING_SNAKE_CASE_ , smooth_value=SCREAMING_SNAKE_CASE_ , force=SCREAMING_SNAKE_CASE_ , lowercase=SCREAMING_SNAKE_CASE_ , use_effective_order=SCREAMING_SNAKE_CASE_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case_ ( datasets.Metric):
def __lowercase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCamelCase : str ={}
result.update({'''sari''': compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({'''exact''': compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 262 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : int = 0
while b > 0:
if b & 1:
__UpperCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 63 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__A : Dict = img
__A : Dict = img.shape[1]
__A : Tuple = img.shape[0]
__A : List[str] = dst_width
__A : Tuple = dst_height
__A : Optional[Any] = self.src_w / self.dst_w
__A : List[str] = self.src_h / self.dst_h
__A : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def snake_case__ ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__A : List[str] = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )]
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return int(self.ratio_x * x )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : str = 8_0_0, 6_0_0
UpperCAmelCase_ : str = imread('image_data/lena.jpg', 1)
UpperCAmelCase_ : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 365 | 0 |
from ...processing_utils import ProcessorMixin
class lowercase_ ( A ):
__lowerCamelCase = ["image_processor", "feature_extractor"]
__lowerCamelCase = "TvltImageProcessor"
__lowerCamelCase = "TvltFeatureExtractor"
def __init__( self , __A , __A ) -> Union[str, Any]:
super().__init__(image_processor=__A , feature_extractor=__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =image_processor
SCREAMING_SNAKE_CASE_ : str =feature_extractor
def __call__( self , __A=None , __A=None , __A=None , __A=None , __A=False , __A=False , *__A , **__A , ) -> Tuple:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
SCREAMING_SNAKE_CASE_ : List[Any] =None
if images is not None:
SCREAMING_SNAKE_CASE_ : List[str] =self.image_processor(__A , mask_pixel=__A , *__A , **__A )
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Any =self.image_processor(__A , is_mixed=__A , *__A , **__A )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extractor(
__A , *__A , sampling_rate=__A , mask_audio=__A , **__A )
SCREAMING_SNAKE_CASE_ : int ={}
if audio is not None:
output_dict.update(__A )
if images is not None:
output_dict.update(__A )
if images_mixed_dict is not None:
output_dict.update(__A )
return output_dict
@property
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Optional[int] =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 431 |
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[x.strip() for x in open(UpperCAmelCase_ ).readlines()]
SCREAMING_SNAKE_CASE_ : Dict =[x.strip() for x in open(UpperCAmelCase_ ).readlines()][: len(UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
if save_path is not None:
save_json(UpperCAmelCase_ , UpperCAmelCase_ , indent=UpperCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 431 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['PoolFormerFeatureExtractor']
UpperCAmelCase_ = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 603 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603 | 1 |
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
a_ : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
a_ : str = kwargs.pop('main_process_only' , SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = kwargs.pop('in_order' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
a_ , a_ : Tuple = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
a_ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
a_ , a_ : Dict = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str = None ) -> List[str]:
"""simple docstring"""
if log_level is None:
a_ : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , __A )
a_ : List[Any] = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A , {} )
| 443 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Tuple , __A : Tuple=8 ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : VQModel , ) -> Any:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
if latents is None:
a_ : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Any = latents.to(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a_ : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
a_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : Optional[Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
a_ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> int:
a_ : Dict = self._execution_device
a_ : int = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a_ : Optional[Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : int = hint.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.scheduler.timesteps
a_ : Optional[int] = self.movq.config.latent_channels
a_ , a_ : Dict = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
a_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
a_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : int = {'image_embeds': image_embeds, 'hint': hint}
a_ : str = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
a_ , a_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : str = noise_pred.chunk(2 )
a_ , a_ : Union[str, Any] = variance_pred.chunk(2 )
a_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : str = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
a_ : str = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : str = image * 0.5 + 0.5
a_ : Union[str, Any] = image.clamp(0 , 1 )
a_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 443 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = None ) ->int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_lowerCamelCase : str = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCamelCase : Optional[int] = nums[i]
_lowerCamelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE_ , ans + num , SCREAMING_SNAKE_CASE_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
SCREAMING_SNAKE_CASE__ : List[Any] =int(input('Enter number of elements : ').strip())
SCREAMING_SNAKE_CASE__ : List[Any] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 434 | """simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE__ : Tuple =sys.version_info >= (3, 10)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Optional[int]:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = False
__snake_case = True
__snake_case = None
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """titi"""
__snake_case = """toto"""
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """titi"""
__snake_case = """toto"""
__snake_case = 42
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
def a__ ( self ) -> Dict:
_lowerCamelCase : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = None
__snake_case = field(default=a_ , metadata={"""help""": """help message"""} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[1, 2, 3] )
__snake_case = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__snake_case = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = field()
__snake_case = field()
__snake_case = field()
def a__ ( self ) -> Dict:
_lowerCamelCase : str = BasicEnum(self.required_enum )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = field()
__snake_case = None
__snake_case = field(default="""toto""" , metadata={"""help""": """help message"""} )
__snake_case = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = False
__snake_case = True
__snake_case = None
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = None
__snake_case = field(default=a_ , metadata={"""help""": """help message"""} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCamelCase : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != '''container'''}
_lowerCamelCase : str = {k: v for k, v in vars(_lowercase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowercase ) and yy.get('''choices''' , _lowercase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowercase ) , yy['''type'''](_lowercase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : str = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--bar''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--baz''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--flag''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : str = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_lowerCamelCase), ) : Union[str, Any] = parser.parse_args_into_dataclasses(_lowercase , look_for_args_file=_lowercase )
self.assertFalse(example.flag )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Dict = HfArgumentParser(_lowercase )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''' )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowercase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase )
_lowerCamelCase : Optional[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
_lowerCamelCase : Optional[Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : List[Any] = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : int = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : Any = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : Dict = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowerCamelCase : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCamelCase : Optional[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowerCamelCase : Any = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCamelCase : str = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_lowerCamelCase : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def a__ ( self ) -> Dict:
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
_lowerCamelCase : Dict = HfArgumentParser(_lowercase )
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowerCamelCase : Optional[int] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowerCamelCase : Optional[int] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : int = HfArgumentParser(_lowercase )
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowercase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowercase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : int = parser.parse_args([] )
self.assertEqual(
_lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCamelCase : List[Any] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowercase , type=_lowercase )
expected.add_argument('''--bar''' , default=_lowercase , type=_lowercase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowercase , type=_lowercase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowercase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowercase )
_lowerCamelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
_lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : List[str] = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , bar=_lowercase , baz=_lowercase , ces=[] , des=[] ) )
_lowerCamelCase : List[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowercase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def a__ ( self ) -> Any:
_lowerCamelCase : str = HfArgumentParser(_lowercase )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--required_str''' , type=_lowercase , required=_lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowercase , )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Dict:
_lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowercase , )
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = HfArgumentParser(_lowercase )
_lowerCamelCase : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_lowerCamelCase : Optional[int] = parser.parse_dict(_lowercase )[0]
_lowerCamelCase : str = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowercase , parser.parse_dict , _lowercase , allow_extra_keys=_lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Any = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Union[str, Any] = os.path.join(_lowercase , '''temp_json''' )
os.mkdir(_lowercase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowercase , _lowercase )
_lowerCamelCase : str = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_lowerCamelCase : Optional[int] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Tuple:
_lowerCamelCase : int = HfArgumentParser(_lowercase )
_lowerCamelCase : int = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = os.path.join(_lowercase , '''temp_yaml''' )
os.mkdir(_lowercase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowercase , _lowercase )
_lowerCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_lowerCamelCase : Tuple = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> str:
_lowerCamelCase : List[Any] = HfArgumentParser(_lowercase )
self.assertIsNotNone(_lowercase )
| 434 | 1 |
'''simple docstring'''
__snake_case: Optional[int] = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
__snake_case: Any = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
__snake_case: Dict = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case: Union[str, Any] = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
__snake_case: Any = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
__snake_case: Union[str, Any] = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
__snake_case: str = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
__snake_case: Optional[Any] = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 460 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case: List[str] = logging.get_logger(__name__)
__snake_case: Dict = "https://openaipublic.azureedge.net/jukebox/models/"
__snake_case: List[str] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _snake_case ( A_ : str ):
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Optional[Any] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : Tuple = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : List[str] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
a_ : str = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
a_ : Any = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ : Optional[Any] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
a_ : Optional[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _snake_case ( A_ : str , A_ : int , A_ : str , A_ : List[Any] ):
"""simple docstring"""
a_ : List[str] = {}
import re
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : int = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : int = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : List[str] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : Tuple = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
a_ : Dict = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Any = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A_ ):
a_ : Any = re_encoder_block_conv_in.match(A_ )
a_ : str = regex_match.groups()
a_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
a_ : Optional[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
a_ : Any = re_encoder_block_conv_in.sub(A_ , A_ )
elif re_encoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_resnet.match(A_ )
a_ : Optional[Any] = regex_match.groups()
a_ : str = int(groups[2] ) * 2 + int(groups[3] )
a_ : str = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
a_ : List[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_encoder_block_resnet.sub(A_ , A_ )
elif re_encoder_block_proj_out.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_proj_out.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
a_ : Optional[Any] = re_encoder_block_proj_out.sub(A_ , A_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A_ ):
a_ : Union[str, Any] = re_decoder_block_conv_out.match(A_ )
a_ : Union[str, Any] = regex_match.groups()
a_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
a_ : int = re_decoder_block_conv_out.sub(A_ , A_ )
elif re_decoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_resnet.match(A_ )
a_ : str = regex_match.groups()
a_ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
a_ : int = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : Optional[Any] = prefix + resnet_block
a_ : Any = re_decoder_block_resnet.sub(A_ , A_ )
elif re_decoder_block_proj_in.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_proj_in.match(A_ )
a_ : Any = regex_match.groups()
a_ : Dict = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
a_ : Tuple = re_decoder_block_proj_in.sub(A_ , A_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A_ ):
a_ : int = re_prior_cond_conv_out.match(A_ )
a_ : Dict = regex_match.groups()
a_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
a_ : str = re_prior_cond_conv_out.sub(A_ , A_ )
elif re_prior_cond_resnet.fullmatch(A_ ):
a_ : List[str] = re_prior_cond_resnet.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Optional[Any] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
a_ : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_prior_cond_resnet.sub(A_ , A_ )
elif re_prior_cond_proj_in.fullmatch(A_ ):
a_ : List[Any] = re_prior_cond_proj_in.match(A_ )
a_ : int = regex_match.groups()
a_ : Any = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
a_ : Union[str, Any] = re_prior_cond_proj_in.sub(A_ , A_ )
# keep original key
else:
a_ : str = original_key
a_ : Any = replace_key(A_ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
a_ : Tuple = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
a_ : Optional[Any] = original_key
a_ : Tuple = original_key
a_ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _snake_case ( A_ : Dict=None , A_ : Optional[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
a_ : Any = requests.get(f'''{PREFIX}{file}''' , allow_redirects=A_ )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=A_ )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , """wb""" ).write(r.content )
a_ : List[Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
a_ : Optional[Any] = JukeboxConfig.from_pretrained(A_ )
a_ : List[Any] = JukeboxModel(A_ )
a_ : Optional[Any] = []
a_ : Optional[Any] = {}
for i, dict_name in enumerate(A_ ):
a_ : int = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["""model"""]
a_ : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
a_ : int = old_dic[k]
elif k.endswith(""".w""" ):
a_ : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ : Dict = old_dic[k]
else:
a_ : Optional[int] = old_dic[k]
a_ : List[Any] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
a_ : Any = fix_jukebox_keys(A_ , model.state_dict() , A_ , A_ )
weight_dict.append(A_ )
a_ : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(A_ )
for i in range(len(A_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A_ ).mkdir(exist_ok=A_ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(A_ , A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
return weight_dict
if __name__ == "__main__":
__snake_case: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__snake_case: Any = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 460 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.