code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 171 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> None:
if start is None:
UpperCAmelCase__ : Dict = 0
if end is None:
UpperCAmelCase__ : List[str] = len(lowerCAmelCase ) - 1
if start >= end:
return
UpperCAmelCase__ : int = (start + end) // 2
slowsort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
slowsort(lowerCAmelCase , mid + 1 , lowerCAmelCase )
if sequence[end] < sequence[mid]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = sequence[mid], sequence[end]
slowsort(lowerCAmelCase , lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 171 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__UpperCamelCase :List[str] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase , cache_dir=__lowercase)
__UpperCamelCase :int = [t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase)[0] , '''snapshots'''))]
__UpperCamelCase :Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase :int = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase)
__UpperCamelCase :Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Any = jax.random.PRNGKey(0)
__UpperCamelCase :Dict = 4
__UpperCamelCase :int = jax.device_count()
__UpperCamelCase :Any = num_samples * [prompt]
__UpperCamelCase :List[str] = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Union[str, Any] = replicate(__lowercase)
__UpperCamelCase :str = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :int = shard(__lowercase)
__UpperCamelCase :str = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(__lowercase , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
__UpperCamelCase :Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(__lowercase) == num_samples
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase :Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowercase)
__UpperCamelCase :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Any = jax.random.PRNGKey(0)
__UpperCamelCase :Union[str, Any] = 50
__UpperCamelCase :Union[str, Any] = jax.device_count()
__UpperCamelCase :int = num_samples * [prompt]
__UpperCamelCase :Dict = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Any = replicate(__lowercase)
__UpperCamelCase :Optional[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :str = shard(__lowercase)
__UpperCamelCase :Optional[int] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase)
__UpperCamelCase :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Union[str, Any] = jax.random.PRNGKey(0)
__UpperCamelCase :Optional[int] = 50
__UpperCamelCase :Optional[int] = jax.device_count()
__UpperCamelCase :int = num_samples * [prompt]
__UpperCamelCase :str = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Dict = replicate(__lowercase)
__UpperCamelCase :Optional[int] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :List[str] = shard(__lowercase)
__UpperCamelCase :int = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase , __UpperCamelCase :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
__UpperCamelCase :Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Any = jax.random.PRNGKey(0)
__UpperCamelCase :Dict = 50
__UpperCamelCase :Union[str, Any] = jax.device_count()
__UpperCamelCase :str = num_samples * [prompt]
__UpperCamelCase :Tuple = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Optional[int] = replicate(__lowercase)
__UpperCamelCase :int = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :List[str] = shard(__lowercase)
__UpperCamelCase :int = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
__UpperCamelCase , __UpperCamelCase :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , )
__UpperCamelCase :Dict = scheduler.create_state()
__UpperCamelCase :Optional[Any] = scheduler_state
__UpperCamelCase :List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :int = jax.random.PRNGKey(0)
__UpperCamelCase :List[Any] = 50
__UpperCamelCase :Optional[int] = jax.device_count()
__UpperCamelCase :int = num_samples * [prompt]
__UpperCamelCase :Dict = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Dict = replicate(__lowercase)
__UpperCamelCase :Optional[int] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :List[str] = shard(__lowercase)
__UpperCamelCase :Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Any = jax.device_count()
__UpperCamelCase :Dict = num_samples * [prompt]
__UpperCamelCase :List[Any] = jax.random.split(jax.random.PRNGKey(0) , __lowercase)
__UpperCamelCase , __UpperCamelCase :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , )
__UpperCamelCase :str = replicate(__lowercase)
__UpperCamelCase :str = pipeline.prepare_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = shard(__lowercase)
__UpperCamelCase :Union[str, Any] = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase :List[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , )
__UpperCamelCase :Optional[int] = replicate(__lowercase)
__UpperCamelCase :List[str] = pipeline.prepare_inputs(__lowercase)
__UpperCamelCase :Any = shard(__lowercase)
__UpperCamelCase :Any = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase :List[str] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 105 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__lowercase = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = list(s_dict.keys() )
for key in keys:
__UpperCamelCase :Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__UpperCamelCase :str = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"""{key} -> {new_key}""" )
__UpperCamelCase :Any = s_dict.pop(SCREAMING_SNAKE_CASE )
return s_dict
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = emb.weight.shape
__UpperCamelCase :Any = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = emb.weight.data
return lin_layer
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = os.path.basename(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = url.split('''/''' )[-2]
__UpperCamelCase :Tuple = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ) and not os.path.isfile(SCREAMING_SNAKE_CASE ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = open(SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE ) as source, open(SCREAMING_SNAKE_CASE , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=SCREAMING_SNAKE_CASE , unit_divisor=1_024 ) as loop:
while True:
__UpperCamelCase :Optional[Any] = source.read(8_192 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE )
loop.update(len(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :str = open(SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
__UpperCamelCase :Tuple = _download(_MODELS[checkpoint_path] )
else:
__UpperCamelCase :Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCamelCase :Union[str, Any] = original_checkpoint['''dims''']
__UpperCamelCase :List[Any] = original_checkpoint['''model_state_dict''']
__UpperCamelCase :Optional[Any] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
rename_keys(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = True
__UpperCamelCase :Tuple = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
__UpperCamelCase :Dict = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=SCREAMING_SNAKE_CASE , decoder_ffn_dim=SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
__UpperCamelCase :str = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Any = model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0 and not set(SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__UpperCamelCase :Optional[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCamelCase :Union[str, Any] = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 105 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
if isinstance(A_, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(__lowerCAmelCase , __lowerCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int=None , **__lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = FlaxVisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Dict = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = after_output[0]
_lowerCamelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase )
_lowerCamelCase : Any = model(
input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
_lowerCamelCase : str = output.vision_model_output.attentions
self.assertEqual(len(__lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : str = to_atuple(vision_model.config.image_size )
_lowerCamelCase : List[Any] = to_atuple(vision_model.config.patch_size )
_lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCamelCase : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCamelCase : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(__lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
pt_model.to(__lowerCAmelCase )
pt_model.eval()
# prepare inputs
_lowerCamelCase : str = inputs_dict
_lowerCamelCase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowerCamelCase : List[Any] = pt_model(**__lowerCAmelCase ).to_tuple()
_lowerCamelCase : List[Any] = fx_model(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
_lowerCamelCase : List[Any] = fx_model_loaded(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] = VisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_flax=__lowerCAmelCase )
pt_model_loaded.to(__lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
_lowerCamelCase : Dict = pt_model_loaded(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__lowerCAmelCase , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = VisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : int = FlaxVisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCAmelCase )
_lowerCamelCase : Tuple = fx_state
self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = VisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : str = FlaxVisionTextDualEncoderModel(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
self.check_save_load(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowerCAmelCase )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = config_inputs_dict.pop('''vision_config''' )
_lowerCamelCase : Dict = config_inputs_dict.pop('''text_config''' )
_lowerCamelCase : Tuple = config_inputs_dict
self.check_equivalence_pt_to_flax(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.check_equivalence_flax_to_pt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.get_pretrained_model_and_inputs()
_lowerCamelCase : int = model_a(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model_a(**__lowerCAmelCase )
_lowerCamelCase : Tuple = after_outputs[0]
_lowerCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase , 1E-5 )
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = 1_3
_lowerCamelCase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowerCamelCase : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowerCamelCase : Tuple = random_attention_mask([batch_size, 4] )
_lowerCamelCase : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = FlaxViTModel(__lowerCAmelCase )
_lowerCamelCase : List[str] = FlaxBertModel(__lowerCAmelCase )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaxViTModelTester(self )
_lowerCamelCase : Optional[Any] = FlaxBertModelTester(self )
_lowerCamelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase : List[Any] = vision_config_and_inputs
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __snake_case ( _lowercase , unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , )
_lowerCamelCase : List[str] = 1_3
_lowerCamelCase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowerCamelCase : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowerCamelCase : str = random_attention_mask([batch_size, 4] )
_lowerCamelCase : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = FlaxCLIPVisionModel(__lowerCAmelCase )
_lowerCamelCase : str = FlaxBertModel(__lowerCAmelCase )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = FlaxCLIPVisionModelTester(self )
_lowerCamelCase : str = FlaxBertModelTester(self )
_lowerCamelCase : Dict = clip_model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase : Optional[int] = vision_config_and_inputs
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_lowerCamelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_lowerCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowerCamelCase : Union[str, Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCamelCase : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __lowerCAmelCase , atol=1E-3 ) )
| 72 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """char"""
UpperCAmelCase_ = """bpe"""
UpperCAmelCase_ = """wp"""
_lowerCAmelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase_ = """ViTImageProcessor"""
UpperCAmelCase_ = """MgpstrTokenizer"""
def __init__( self :List[str] , lowerCamelCase :Dict=None , lowerCamelCase :Optional[int]=None , **lowerCamelCase :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :List[str]=None , lowerCamelCase :Any=None , lowerCamelCase :Optional[Any]=None , **lowerCamelCase :Optional[int] ) -> Union[str, Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None:
UpperCAmelCase__ = self.char_tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sequences
UpperCAmelCase__ = char_preds.size(0 )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "char" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "bpe" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "wp" )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in range(lowerCamelCase ):
UpperCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase__ = scores.index(max(lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase__ = {}
UpperCAmelCase__ = final_strs
UpperCAmelCase__ = final_scores
UpperCAmelCase__ = char_strs
UpperCAmelCase__ = bpe_strs
UpperCAmelCase__ = wp_strs
return out
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int , lowerCamelCase :List[str] ) -> Union[str, Any]:
if format == DecodeType.CHARACTER:
UpperCAmelCase__ = self.char_decode
UpperCAmelCase__ = 1
UpperCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
UpperCAmelCase__ = self.bpe_decode
UpperCAmelCase__ = 2
UpperCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
UpperCAmelCase__ = self.wp_decode
UpperCAmelCase__ = 102
UpperCAmelCase__ = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = pred_logits.size(0 )
UpperCAmelCase__ = pred_logits.size(1 )
UpperCAmelCase__ , UpperCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase , sorted=lowerCamelCase )
UpperCAmelCase__ = preds_index.view(-1 , lowerCamelCase )[:, 1:]
UpperCAmelCase__ = decoder(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = torch.nn.functional.softmax(lowerCamelCase , dim=2 ).max(dim=2 )
UpperCAmelCase__ = preds_max_prob[:, 1:]
for index in range(lowerCamelCase ):
UpperCAmelCase__ = preds_str[index].find(lowerCamelCase )
UpperCAmelCase__ = preds_str[index][:pred_eos]
UpperCAmelCase__ = preds_index[index].cpu().tolist()
UpperCAmelCase__ = pred_index.index(lowerCamelCase ) if eos_token in pred_index else -1
UpperCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase )
conf_scores.append(lowerCamelCase )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int ) -> List[str]:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Dict ) -> Dict:
return self.bpe_tokenizer.batch_decode(lowerCamelCase )
def UpperCAmelCase_ ( self :str , lowerCamelCase :str ) -> Tuple:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
| 169 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , snake_case__ , )
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Optional[Any] = RobertaConfig
a__ : Optional[Any] = "roberta"
def __init__( self : Any , __UpperCAmelCase : str ) -> Union[str, Any]:
super().__init__(__UpperCAmelCase )
UpperCAmelCase_= RobertaEmbeddings(__UpperCAmelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , snake_case__ , )
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Optional[Any] = RobertaConfig
a__ : List[Any] = "roberta"
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
super().__init__(__UpperCAmelCase )
UpperCAmelCase_= config.num_labels
UpperCAmelCase_= config.num_hidden_layers
UpperCAmelCase_= DeeRobertaModel(__UpperCAmelCase )
UpperCAmelCase_= nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_= nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=-1 , __UpperCAmelCase : str=False , ) -> Dict:
UpperCAmelCase_= self.num_layers
try:
UpperCAmelCase_= self.roberta(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
UpperCAmelCase_= outputs[1]
UpperCAmelCase_= self.dropout(__UpperCAmelCase )
UpperCAmelCase_= self.classifier(__UpperCAmelCase )
UpperCAmelCase_= (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_= e.message
UpperCAmelCase_= e.exit_layer
UpperCAmelCase_= outputs[0]
if not self.training:
UpperCAmelCase_= entropy(__UpperCAmelCase )
UpperCAmelCase_= []
UpperCAmelCase_= []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_= MSELoss()
UpperCAmelCase_= loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_= CrossEntropyLoss()
UpperCAmelCase_= loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_= []
for highway_exit in outputs[-1]:
UpperCAmelCase_= highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_= MSELoss()
UpperCAmelCase_= loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_= CrossEntropyLoss()
UpperCAmelCase_= loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
UpperCAmelCase_= (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_= (loss,) + outputs
if not self.training:
UpperCAmelCase_= outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_= (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 277 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Any=[10, 20, 30, 40] , __UpperCAmelCase : int=[2, 2, 3, 2] , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=37 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=10 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : List[str]=["stage2", "stage3", "stage4"] , __UpperCAmelCase : Dict=[2, 3, 4] , __UpperCAmelCase : List[str]=None , ) -> List[Any]:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= image_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= num_stages
UpperCAmelCase_= hidden_sizes
UpperCAmelCase_= depths
UpperCAmelCase_= is_training
UpperCAmelCase_= use_labels
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= num_labels
UpperCAmelCase_= initializer_range
UpperCAmelCase_= out_features
UpperCAmelCase_= out_indices
UpperCAmelCase_= scope
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Dict ) -> int:
UpperCAmelCase_= ConvNextModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Dict ) -> List[str]:
UpperCAmelCase_= ConvNextForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> Optional[Any]:
UpperCAmelCase_= ConvNextBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_= None
UpperCAmelCase_= ConvNextBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a__ : Union[str, Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
a__ : Optional[int] = True
a__ : Optional[Any] = False
a__ : str = False
a__ : str = False
a__ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_= ConvNextModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= model_class(__UpperCAmelCase )
UpperCAmelCase_= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_= [*signature.parameters.keys()]
UpperCAmelCase_= ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
def check_hidden_states_output(__UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any ):
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_= self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_= ConvNextModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_= ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__UpperCAmelCase )
UpperCAmelCase_= self.default_image_processor
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase_= torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@require_torch
class lowercase ( unittest.TestCase , snake_case__):
"""simple docstring"""
a__ : List[str] = (ConvNextBackbone,) if is_torch_available() else ()
a__ : Dict = ConvNextConfig
a__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= ConvNextModelTester(self )
| 277 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( A__ ):
"""simple docstring"""
_a = (DDPMScheduler,)
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.scheduler_classes[0]
UpperCamelCase__ :Tuple = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.scheduler_classes[0]
UpperCamelCase__ :Any = self.get_scheduler_config()
UpperCamelCase__ :int = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Dict = len(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.dummy_model()
UpperCamelCase__ :int = self.dummy_sample_deter
UpperCamelCase__ :Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
UpperCamelCase__ :Dict = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ :Any = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ :Optional[int] = pred_prev_sample
UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCamelCase__ :List[Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = len(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.dummy_model()
UpperCamelCase__ :Optional[int] = self.dummy_sample_deter
UpperCamelCase__ :Tuple = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
UpperCamelCase__ :Tuple = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ :Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ :List[str] = pred_prev_sample
UpperCamelCase__ :List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ :Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__ :Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ :List[Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
UpperCamelCase__ :Tuple = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
UpperCamelCase__ :List[str] = -1
else:
UpperCamelCase__ :List[str] = timesteps[i + 1]
UpperCamelCase__ :Optional[int] = scheduler.previous_timestep(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.scheduler_classes[0]
UpperCamelCase__ :List[Any] = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = [100, 87, 50, 1, 0]
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.scheduler_classes[0]
UpperCamelCase__ :Any = self.get_scheduler_config()
UpperCamelCase__ :Dict = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ ) | 97 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : Any = re.compile(r"\b(a|an|the)\b", re.UNICODE)
_UpperCamelCase : Union[str, Any] = None
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_lowerCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_lowerCAmelCase , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : int ):
return ARTICLES_REGEX.sub(' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : str ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : List[Any] ):
lowercase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_lowerCAmelCase ).split()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
'''simple docstring'''
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Dict = get_tokens(_lowerCAmelCase )
lowercase__ : List[str] = get_tokens(_lowerCAmelCase )
lowercase__ : List[Any] = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase )
lowercase__ : int = sum(common.values() )
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase__ : Any = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Dict = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Any = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = {}
lowercase__ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Any = qa['id']
lowercase__ : Union[str, Any] = [t for t in qa['answers']['text'] if normalize_answer(_lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__ : Dict = ['']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
lowercase__ : Optional[int] = preds[qid]
# Take max over all gold answers
lowercase__ : int = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
lowercase__ : Optional[Any] = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : str = {}
for qid, s in scores.items():
lowercase__ : int = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__ : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
lowercase__ : Optional[Any] = s
return new_scores
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None ):
'''simple docstring'''
if not qid_list:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for k in new_eval:
lowercase__ : int = new_eval[k]
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
plt.step(_lowerCAmelCase , _lowerCAmelCase , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_lowerCAmelCase )
plt.savefig(_lowerCAmelCase )
plt.clf()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
'''simple docstring'''
lowercase__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
lowercase__ : Tuple = 0.0
lowercase__ : List[str] = 1.0
lowercase__ : List[str] = 0.0
lowercase__ : Union[str, Any] = [1.0]
lowercase__ : List[Any] = [0.0]
lowercase__ : Optional[int] = 0.0
for i, qid in enumerate(_lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__ : Tuple = true_pos / float(i + 1 )
lowercase__ : Union[str, Any] = true_pos / float(_lowerCAmelCase )
if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCAmelCase )
recalls.append(_lowerCAmelCase )
if out_image:
plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return {"ap": 1_0_0.0 * avg_prec}
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
lowercase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase__ : Dict = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowercase__ : Tuple = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowercase__ : List[Any] = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase__ : Any = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_exact' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_f1' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_oracle' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if not qid_list:
return
lowercase__ : List[str] = [na_probs[k] for k in qid_list]
lowercase__ : Tuple = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) )
plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(_lowerCAmelCase , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase__ : int = num_no_ans
lowercase__ : Optional[int] = cur_score
lowercase__ : Tuple = 0.0
lowercase__ : Dict = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(_lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__ : Optional[int] = scores[qid]
else:
if preds[qid]:
lowercase__ : List[Any] = -1
else:
lowercase__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
lowercase__ : Dict = cur_score
lowercase__ : Optional[int] = na_probs[qid]
return 1_0_0.0 * best_score / len(_lowerCAmelCase ), best_thresh
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[Any] = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : Dict = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Any = best_exact
lowercase__ : Tuple = exact_thresh
lowercase__ : Optional[Any] = best_fa
lowercase__ : Any = fa_thresh
def a_ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase__ : List[Any] = json.load(_lowerCAmelCase )
lowercase__ : Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowercase__ : str = json.load(_lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase__ : Union[str, Any] = json.load(_lowerCAmelCase )
else:
lowercase__ : str = {k: 0.0 for k in preds}
lowercase__ : int = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False
lowercase__ : List[str] = [k for k, v in qid_to_has_ans.items() if v]
lowercase__ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__ , lowercase__ : Any = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Union[str, Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Tuple = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase )
if has_ans_qids:
lowercase__ : int = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'HasAns' )
if no_ans_qids:
lowercase__ : Optional[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
else:
print(json.dumps(_lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = 'timm_backbone'
def __init__( self , A_=None , A_=3 , A_=True , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Tuple = backbone
UpperCamelCase : Dict = num_channels
UpperCamelCase : Tuple = features_only
UpperCamelCase : Optional[int] = use_pretrained_backbone
UpperCamelCase : Dict = True
UpperCamelCase : List[str] = out_indices if out_indices is not None else (-1,)
| 140 | 1 |
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowerCAmelCase = []
_lowerCAmelCase = list(range(snake_case ) )
# Find permutation
while factorials:
_lowerCAmelCase = factorials.pop()
_lowerCAmelCase , _lowerCAmelCase = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_UpperCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 365 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def lowerCamelCase ( self : Union[str, Any] ):
super().setUp()
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Tuple = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase ( self : int ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase ( self : Tuple ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase ( self : List[str] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = ["""A long paragraph for summarization."""]
snake_case__ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : int = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
snake_case__ : int = inputs["""input_ids"""]
snake_case__ : str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase ( self : int ):
pass
def lowerCamelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
snake_case__ : int = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 43 | 0 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( a_: List[Any] ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase : str = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( a_: List[Any] ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ):
def do_benchmark(a_: Union[str, Any] ) -> None:
_UpperCAmelCase : List[str] = '''import __main__ as z'''
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(a_ ) = }""" )
_UpperCAmelCase : Optional[int] = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=a_ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(a_ ) = }""" )
_UpperCAmelCase : Union[str, Any] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=a_, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(a_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 145 | import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
A__ : Dict = get_logger(__name__)
class snake_case__ :
def __init__( self : Union[str, Any] , __a : int , __a : Tuple=None ) -> int:
'''simple docstring'''
__snake_case : List[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , __a , getattr(__a , __a ) )
__snake_case : Optional[int] = module._original_module if isinstance(__a , _PatchedModuleObj ) else module
class snake_case__ :
A__ = []
def __init__( self : List[str] , __a : str , __a : str , __a : Any , __a : Optional[int]=None ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = obj
__snake_case : Tuple = target
__snake_case : List[str] = new
__snake_case : int = target.split('.' )[0]
__snake_case : Dict = {}
__snake_case : Union[str, Any] = attrs or []
def __enter__( self : List[str] ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a ) ):
try:
__snake_case : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__snake_case : Any = getattr(self.obj , __a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__snake_case : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) )
__snake_case : Optional[int] = getattr(self.obj , __a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) )
__snake_case : Dict = getattr(__a , __a )
# finally set the target attribute
setattr(__a , __a , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__snake_case : str = getattr(import_module('.'.join(__a ) ) , __a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __a ) is attr_value:
__snake_case : List[str] = getattr(self.obj , __a )
setattr(self.obj , __a , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__snake_case : Union[str, Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , __a , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : int , *__a : Optional[int] ) -> str:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , __a , self.original.pop(__a ) )
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 367 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
A__ : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) )
__snake_case : str = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**__a )
__snake_case : Any = do_lower_case
def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 0 | 0 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = len(snake_case )
for i in range(length - 1 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = i
for k in range(i + 1 , snake_case ):
if collection[k] < collection[least]:
__SCREAMING_SNAKE_CASE : Optional[int] = k
if least != i:
__SCREAMING_SNAKE_CASE : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 303 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'xlm-roberta-xl'
def __init__( self , __snake_case=2_5_0_8_8_0 , __snake_case=2_5_6_0 , __snake_case=3_6 , __snake_case=3_2 , __snake_case=1_0_2_4_0 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_4 , __snake_case=1 , __snake_case=0.02 , __snake_case=1E-05 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class A__ ( snake_case__ ):
"""simple docstring"""
@property
def a_ ( self ):
if self.task == "multiple-choice":
snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 213 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE : str = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def UpperCAmelCase__ (UpperCamelCase_ = "mumbai" ):
"""simple docstring"""
snake_case = BeautifulSoup(requests.get(url + location ).content ,'''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' ,attrs={'''data-tn-component''': '''organicJob'''} ):
snake_case = job.find('''a''' ,attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
snake_case = job.find('''span''' ,{'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 213 | 1 |
import os
snake_case__ : Any = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def _a ( lowerCamelCase: str ) -> int:
'''simple docstring'''
__A = 0
__A = 0
while index < len(lowerCamelCase ) - 1:
__A = SYMBOLS[numerals[index]]
__A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _a ( lowerCamelCase: int ) -> str:
'''simple docstring'''
__A = ''''''
__A = num // 10_00
numerals += m_count * "M"
num %= 10_00
__A = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _a ( lowerCamelCase: str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
__A = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
__A = filea.readlines()
for line in lines:
__A = line.strip()
__A = parse_roman_numerals(lowerCamelCase )
__A = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 117 |
import argparse
import copy
def _a ( lowerCamelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__A = {}
with open(lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__A = []
_list.append([line.split()[1], line.split()[2]] )
__A = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__A = []
_list.append([line.split()[0], line.split()[2]] )
__A = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _a ( lowerCamelCase: Any , lowerCamelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
with open(lowerCamelCase ) as f:
__A = f.read(1 )
__A = start_node
__A = []
__A = start_node
__A = 0
while visiting not in first_solution:
__A = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase ) and k[0] not in first_solution:
__A = k[1]
__A = k[0]
first_solution.append(lowerCamelCase )
__A = distance_of_first_solution + int(lowerCamelCase )
__A = best_node
first_solution.append(lowerCamelCase )
__A = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__A = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def _a ( lowerCamelCase: List[str] , lowerCamelCase: Any ) -> Any:
'''simple docstring'''
__A = []
for n in solution[1:-1]:
__A = solution.index(lowerCamelCase )
for kn in solution[1:-1]:
__A = solution.index(lowerCamelCase )
if n == kn:
continue
__A = copy.deepcopy(lowerCamelCase )
__A = kn
__A = n
__A = 0
for k in _tmp[:-1]:
__A = _tmp[_tmp.index(lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__A = distance + int(i[1] )
_tmp.append(lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__A = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: Dict , lowerCamelCase: Any , lowerCamelCase: Optional[int] , lowerCamelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
__A = 1
__A = first_solution
__A = []
__A = distance_of_first_solution
__A = solution
while count <= iters:
__A = find_neighborhood(lowerCamelCase , lowerCamelCase )
__A = 0
__A = neighborhood[index_of_best_solution]
__A = len(lowerCamelCase ) - 1
__A = False
while not found:
__A = 0
while i < len(lowerCamelCase ):
if best_solution[i] != solution[i]:
__A = best_solution[i]
__A = solution[i]
break
__A = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__A = True
__A = best_solution[:-1]
__A = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__A = cost
__A = solution
else:
__A = index_of_best_solution + 1
__A = neighborhood[index_of_best_solution]
if len(lowerCamelCase ) >= size:
tabu_list.pop(0 )
__A = count + 1
return best_solution_ever, best_cost
def _a ( lowerCamelCase: List[str]=None ) -> str:
'''simple docstring'''
__A = generate_neighbours(args.File )
__A , __A = generate_first_solution(
args.File , lowerCamelCase )
__A , __A = tabu_search(
lowerCamelCase , lowerCamelCase , lowerCamelCase , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 117 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ):
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
snake_case_ : str = Text(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , **lowercase_ , )
def _snake_case ( self : Any ):
# Build iterable dataset
if self.streaming:
snake_case_ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : List[Any] = None
snake_case_ : Optional[Any] = None
snake_case_ : str = None
snake_case_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
snake_case_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
| 155 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : Any = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase__ : Any = {
'''google/rembert''': 2_56,
}
lowercase__ : Optional[Any] = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = RemBertTokenizer
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : str="<unk>" , lowercase_ : Tuple="[SEP]" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , **lowercase_ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = do_lower_case
snake_case_ : List[Any] = remove_space
snake_case_ : str = keep_accents
snake_case_ : str = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
def _snake_case ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _snake_case ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase_ ) )
return
snake_case_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 155 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase : List[Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __A ( self , A , A , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TextaTextGenerationPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
return generator, ["Something to write", "Something else"]
def __A ( self , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = generator("""Something there""" )
self.assertEqual(lowerCamelCase__ , [{"""generated_text""": ANY(lowerCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
[{"""generated_text""": ANY(lowerCamelCase__ )}, {"""generated_text""": ANY(lowerCamelCase__ )}],
[{"""generated_text""": ANY(lowerCamelCase__ )}, {"""generated_text""": ANY(lowerCamelCase__ )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
[{"""generated_text""": ANY(lowerCamelCase__ )}, {"""generated_text""": ANY(lowerCamelCase__ )}],
[{"""generated_text""": ANY(lowerCamelCase__ )}, {"""generated_text""": ANY(lowerCamelCase__ )}],
] , )
with self.assertRaises(lowerCamelCase__ ):
generator(4 )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=lowerCamelCase__ , num_beams=lowerCamelCase__ , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = generator("""This is a test""" , do_sample=lowerCamelCase__ , num_return_sequences=2 , return_tensors=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase__ , )
self.assertEqual(
lowerCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , [{"""generated_text""": """"""}] )
| 252 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 0 |
'''simple docstring'''
import sys
A__ : Any =(
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCamelCase__ ( lowerCAmelCase = N ):
"""simple docstring"""
_lowerCAmelCase = -sys.maxsize - 1
for i in range(len(lowerCAmelCase ) - 12 ):
_lowerCAmelCase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowerCAmelCase = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 220 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("""String lengths must match!""" )
_lowerCAmelCase = 0
for chara, chara in zip(lowerCAmelCase , lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 1 |
def UpperCamelCase ( snake_case__ : list , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = 0 ) -> int:
UpperCamelCase : Dict = right or len(snake_case__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case__ , snake_case__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__UpperCAmelCase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCamelCase ( snake_case__ : int ) -> Optional[int]:
UpperCamelCase : str = EfficientNetConfig()
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['hidden_dim']
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['width_coef']
UpperCamelCase : str = CONFIG_MAP[model_name]['depth_coef']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['dropout_rate']
UpperCamelCase : str = CONFIG_MAP[model_name]['dw_padding']
UpperCamelCase : str = 'huggingface/label-files'
UpperCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
UpperCamelCase : Optional[Any] = 1000
UpperCamelCase : Dict = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[int] = idalabel
UpperCamelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def UpperCamelCase ( snake_case__ : List[str] ) -> List[Any]:
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=snake_case__ , )
return preprocessor
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
UpperCamelCase : int = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
UpperCamelCase : str = sorted(set(snake_case__ ) )
UpperCamelCase : int = len(snake_case__ )
UpperCamelCase : str = {b: str(snake_case__ ) for b, i in zip(snake_case__ , range(snake_case__ ) )}
UpperCamelCase : Optional[int] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
UpperCamelCase : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
UpperCamelCase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase : Dict = 'efficientnet.' + item[1]
UpperCamelCase : Dict = 'classifier.weight'
UpperCamelCase : Dict = 'classifier.bias'
return key_mapping
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase : str = torch.from_numpy(snake_case__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase : Any = torch.from_numpy(snake_case__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase : str = torch.from_numpy(np.transpose(snake_case__ ) )
else:
UpperCamelCase : str = torch.from_numpy(snake_case__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(snake_case__ )
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] ) -> Any:
UpperCamelCase : Union[str, Any] = model_classes[model_name](
include_top=snake_case__ , weights='imagenet' , input_tensor=snake_case__ , input_shape=snake_case__ , pooling=snake_case__ , classes=1000 , classifier_activation='softmax' , )
UpperCamelCase : Optional[int] = original_model.trainable_variables
UpperCamelCase : Optional[int] = original_model.non_trainable_variables
UpperCamelCase : Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase : List[Any] = param.numpy()
UpperCamelCase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase : str = get_efficientnet_config(snake_case__ )
UpperCamelCase : Any = EfficientNetForImageClassification(snake_case__ ).eval()
UpperCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
UpperCamelCase : List[Any] = rename_keys(snake_case__ )
replace_params(snake_case__ , snake_case__ , snake_case__ )
# Initialize preprocessor and preprocess input image
UpperCamelCase : List[Any] = convert_image_processor(snake_case__ )
UpperCamelCase : Dict = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = hf_model(**snake_case__ )
UpperCamelCase : Dict = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase : Optional[int] = False
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase : List[Any] = image.img_to_array(snake_case__ )
UpperCamelCase : str = np.expand_dims(snake_case__ , axis=0 )
UpperCamelCase : Any = original_model.predict(snake_case__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(snake_case__ , snake_case__ , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(snake_case__ ):
os.mkdir(snake_case__ )
# Save converted model and image processor
hf_model.save_pretrained(snake_case__ )
preprocessor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
UpperCamelCase : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(snake_case__ )
hf_model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 119 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCamelCase = "hf-internal-testing/tiny-random-bert"
UpperCamelCase = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
UpperCamelCase = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Union[str, Any] = cached_file(lowercase_ , lowercase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowercase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowercase_ , lowercase_ ) ) )
with open(os.path.join(lowercase_ , "refs" , "main" ) ) as f:
lowercase__ : str = f.read()
self.assertEqual(lowercase_ , os.path.join(lowercase_ , "snapshots" , lowercase_ , lowercase_ ) )
self.assertTrue(os.path.isfile(lowercase_ ) )
# File is cached at the same place the second time.
lowercase__ : str = cached_file(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
# Using a specific revision to test the full commit hash.
lowercase__ : Optional[Any] = cached_file(lowercase_ , lowercase_ , revision="9b8c223" )
self.assertEqual(lowercase_ , os.path.join(lowercase_ , "snapshots" , lowercase_ , lowercase_ ) )
def __UpperCamelCase ( self : str ) -> List[str]:
with self.assertRaisesRegex(lowercase_ , "is not a valid model identifier" ):
lowercase__ : Optional[int] = cached_file("tiny-random-bert" , lowercase_ )
with self.assertRaisesRegex(lowercase_ , "is not a valid git identifier" ):
lowercase__ : int = cached_file(lowercase_ , lowercase_ , revision="aaaa" )
with self.assertRaisesRegex(lowercase_ , "does not appear to have a file named" ):
lowercase__ : Union[str, Any] = cached_file(lowercase_ , "conf" )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
with self.assertRaisesRegex(lowercase_ , "does not appear to have a file named" ):
lowercase__ : Dict = cached_file(lowercase_ , "conf" )
with open(os.path.join(lowercase_ , "refs" , "main" ) ) as f:
lowercase__ : int = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , ".no_exist" , lowercase_ , "conf" ) ) )
lowercase__ : int = cached_file(lowercase_ , "conf" , _raise_exceptions_for_missing_entries=lowercase_ )
self.assertIsNone(lowercase_ )
lowercase__ : Dict = cached_file(lowercase_ , "conf" , local_files_only=lowercase_ , _raise_exceptions_for_missing_entries=lowercase_ )
self.assertIsNone(lowercase_ )
lowercase__ : Union[str, Any] = mock.Mock()
lowercase__ : str = 5_00
lowercase__ : Tuple = {}
lowercase__ : List[Any] = HTTPError
lowercase__ : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowercase_ ) as mock_head:
lowercase__ : int = cached_file(lowercase_ , "conf" , _raise_exceptions_for_connection_errors=lowercase_ )
self.assertIsNone(lowercase_ )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Dict ) -> int:
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase_ ) )
def __UpperCamelCase ( self : str ) -> Dict:
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowercase_ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowercase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowercase_ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowercase_ , revision="ahaha" )
lowercase__ : Any = get_file_from_repo("bert-base-cased" , lowercase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__ : int = json.loads(open(lowercase_ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_68 )
def __UpperCamelCase ( self : Any ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = Path(lowercase_ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowercase_ , "a.txt" ) , str(lowercase_ ) )
self.assertIsNone(get_file_from_repo(lowercase_ , "b.txt" ) )
| 350 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 0 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A =16
A =32
def snake_case_ (_a : Accelerator , _a : int = 1_6 ):
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_a : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
_a , padding='''longest''' , max_length=_a , pad_to_multiple_of=_a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=_a )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def snake_case_ (_a : Optional[Any] , _a : Union[str, Any] ):
# Initialize accelerator
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(_a )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(_a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=_a )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=1_0_0 , num_training_steps=(len(_a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
_a , _a , _a , _a , _a )
# Now we train the model
for epoch in range(_a ):
model.train()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.loss
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_a , references=_a , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _a )
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_a , default=_a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 34 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 107 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowercase ( unittest.TestCase ):
def a ( self , snake_case , snake_case , snake_case ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def a ( self ):
snake_case_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(snake_case ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def a ( self ):
snake_case_ = None
ops.enable_eager_execution_internal()
snake_case_ = tf.config.list_physical_devices('CPU' )
if len(snake_case ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
snake_case_ = tf.config.list_logical_devices(device_type='CPU' )
snake_case_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
snake_case_ = GradientAccumulator()
snake_case_ = tf.Variable([4.0, 3.0] )
snake_case_ , snake_case_ = create_optimizer(5e-5 , 10 , 5 )
snake_case_ = tf.Variable([0.0, 0.0] , trainable=snake_case )
def accumulate_on_replica(snake_case ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(snake_case , snake_case ):
with strategy.scope():
snake_case_ = strategy.experimental_local_results(snake_case )
local_variables[0].assign(snake_case )
local_variables[1].assign(snake_case )
strategy.run(snake_case , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(snake_case )
def _check_local_values(snake_case , snake_case ):
snake_case_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , snake_case , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , snake_case , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 370 |
from math import sqrt
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
for i in range(1 , int(sqrt(UpperCamelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase__ ):
total += i + n // i
elif i == sqrt(UpperCamelCase__ ):
total += i
return total - n
def __lowerCamelCase ( UpperCamelCase__ = 10000 ):
'''simple docstring'''
snake_case_ = sum(
i
for i in range(1 , UpperCamelCase__ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 200 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
def __init__( self : List[str] , A : Any , ) -> Dict:
lowercase_ : Tuple = parent
lowercase_ : str = 13
lowercase_ : Optional[Any] = 7
lowercase_ : Any = True
lowercase_ : str = True
lowercase_ : List[str] = True
lowercase_ : int = True
lowercase_ : Dict = True
lowercase_ : int = False
lowercase_ : Dict = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[Any] = 2
lowercase_ : Optional[int] = 99
lowercase_ : List[Any] = 0
lowercase_ : Dict = 32
lowercase_ : List[Any] = 2
lowercase_ : Tuple = 4
lowercase_ : List[Any] = 0.1
lowercase_ : List[Any] = 0.1
lowercase_ : Optional[Any] = 5_12
lowercase_ : Optional[Any] = 16
lowercase_ : List[str] = 2
lowercase_ : str = 0.02
lowercase_ : Any = 3
lowercase_ : List[str] = 4
lowercase_ : Dict = '''last'''
lowercase_ : int = True
lowercase_ : str = None
lowercase_ : Dict = 0
def A ( self : List[str] ) -> List[Any]:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowercase_ : Any = None
if self.use_input_lengths:
lowercase_ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase_ : List[Any] = None
if self.use_token_type_ids:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : Optional[Any] = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowercase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Any = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A ( self : int , A : Dict , A : Dict , A : str , A : Tuple , A : Optional[Any] , A : str , A : Union[str, Any] , A : Dict , A : Optional[int] , ) -> int:
lowercase_ : str = TFFlaubertModel(config=A )
lowercase_ : int = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
lowercase_ : int = model(A )
lowercase_ : Any = [input_ids, input_mask]
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , A : int , A : List[str] , A : Dict , A : int , A : int , A : Dict , A : Optional[int] , A : Dict , A : int , ) -> Optional[Any]:
lowercase_ : int = TFFlaubertWithLMHeadModel(A )
lowercase_ : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
lowercase_ : List[str] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , A : Optional[int] , A : Tuple , A : Dict , A : List[Any] , A : Dict , A : Any , A : List[Any] , A : List[Any] , A : str , ) -> Union[str, Any]:
lowercase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(A )
lowercase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
lowercase_ : List[str] = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int , A : Optional[Any] , A : int , A : List[str] , A : Optional[Any] , A : Tuple , A : Dict , A : Any , A : Any , A : str , ) -> Optional[int]:
lowercase_ : str = TFFlaubertForSequenceClassification(A )
lowercase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : List[Any] , A : Tuple , A : int , A : Dict , A : Any , A : int , A : Optional[int] , A : str , A : str , A : int , ) -> Optional[int]:
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : List[Any] = TFFlaubertForTokenClassification(config=A )
lowercase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : int = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , A : str , A : Any , A : int , A : Dict , A : Tuple , A : List[Any] , A : Optional[Any] , A : List[Any] , A : Optional[int] , ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = self.num_choices
lowercase_ : Union[str, Any] = TFFlaubertForMultipleChoice(config=A )
lowercase_ : Union[str, Any] = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Any = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Dict = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase_ : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] ) -> str:
lowercase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Dict = config_and_inputs
lowercase_ : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def A ( self : Any , A : Any , A : Union[str, Any] , A : Optional[int] , A : int , A : str ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = TFFlaubertModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=A , emb_dim=37 )
def A ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> int:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def A ( self : List[str] ) -> int:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def A ( self : List[str] ) -> int:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def A ( self : Any ) -> List[Any]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def A ( self : Optional[int] ) -> str:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def A ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : str ) -> int:
lowercase_ : Any = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
lowercase_ : Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowercase_ : int = model(A )[0]
lowercase_ : str = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
lowercase_ : int = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 33 |
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = r"\w+[.]\d+"
UpperCAmelCase_ : List[Any] = re.findall(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for pat in pats:
UpperCAmelCase_ : List[Any] = key.replace(_SCREAMING_SNAKE_CASE , "_".join(pat.split("." ) ) )
return key
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=42 ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : Tuple = flax_model.init_weights(PRNGKey(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : str = rename_key(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : int = rename_key_and_reshape_tensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Optional[int] = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
| 67 |
'''simple docstring'''
import os
from pathlib import Path
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
UpperCAmelCase_ : Union[str, Any] = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase_ : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 67 | 1 |
import baseaa
def SCREAMING_SNAKE_CASE__ ( __a ):
return baseaa.aaaencode(string.encode('utf-8' ) )
def SCREAMING_SNAKE_CASE__ ( __a ):
return baseaa.aaadecode(__a ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 1 |
'''simple docstring'''
lowerCAmelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 352 |
'''simple docstring'''
import math
def _A ( A__ = 100 ):
"""simple docstring"""
__lowercase = sum(i * i for i in range(1 , n + 1 ) )
__lowercase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 52 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
__UpperCAmelCase = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCAmelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCAmelCase = sorted(arg_to_scheduler.keys())
__UpperCAmelCase = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase="base" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ) -> Tuple:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = Path(self.hparams.output_dir )
UpperCAmelCase_ : List[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_ : str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase_ : List[Any] = config
UpperCAmelCase_ : str = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
UpperCAmelCase_ : str = tokenizer
UpperCAmelCase_ : int = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_ : Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
UpperCAmelCase_ : Dict = model
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : List[str] = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_ : List[Any] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase_ : Optional[int] = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = self.model
UpperCAmelCase_ : Union[str, Any] = ['bias', 'LayerNorm.weight']
UpperCAmelCase_ : Tuple = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_ : Optional[int] = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
UpperCAmelCase_ : Any = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase_ : List[Any] = optimizer
UpperCAmelCase_ : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
return self.validation_end(__UpperCAmelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_ : Tuple = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
if stage == "test":
UpperCAmelCase_ : str = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_ : Dict = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
UpperCAmelCase_ : int = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> str:
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.train_loader
def __UpperCAmelCase ( self ) -> List[Any]:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __UpperCAmelCase ( self ) -> Tuple:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.output_dir.joinpath('best_tfmr' )
UpperCAmelCase_ : Union[str, Any] = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=3_2 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=3_2 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class lowerCamelCase (pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class lowerCamelCase (pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
UpperCAmelCase_ : Tuple = trainer.lr_schedulers[0]['scheduler']
UpperCAmelCase_ : Optional[Any] = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
rank_zero_info('***** Validation results *****' )
UpperCAmelCase_ : Dict = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
rank_zero_info('***** Test results *****' )
UpperCAmelCase_ : Optional[int] = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_ : Optional[int] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
parser.add_argument(
'--output_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=__snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=__snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=__snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=__snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=__snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=__snake_case , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=__snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowercase__ ( __snake_case : BaseTransformer , __snake_case : argparse.Namespace , __snake_case : Tuple=None , __snake_case : Any=True , __snake_case : List[str]=[] , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , **__snake_case : Optional[Any] , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_ : Optional[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__snake_case )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__snake_case )
if logging_callback is None:
UpperCAmelCase_ : int = LoggingCallback()
UpperCAmelCase_ : Optional[int] = {}
if args.fpaa:
UpperCAmelCase_ : Tuple = 16
if args.gpus > 1:
UpperCAmelCase_ : Union[str, Any] = 'auto'
UpperCAmelCase_ : str = 'ddp'
UpperCAmelCase_ : List[str] = args.accumulate_grad_batches
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[str] = 'auto'
UpperCAmelCase_ : List[str] = pl.Trainer.from_argparse_args(
__snake_case , weights_summary=__snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **__snake_case , )
if args.do_train:
trainer.fit(__snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Any ):
UpperCamelCase_ : Union[str, Any] = set()
UpperCamelCase_ : List[Any] = []
def parse_line(lowerCamelCase : int ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : Dict = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : List[str] = '\n'.join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
UpperCamelCase_ : Union[str, Any] = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ):
UpperCamelCase_ : Optional[int] = set()
UpperCamelCase_ : Optional[int] = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def __lowercase ( lowerCamelCase : Optional[Any] ):
return values.split(',' )
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
a_ = parser.parse_args()
a_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a_ = extract_warnings(args.output_dir, args.targets)
a_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 361 | from timeit import timeit
a_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[Any] = len(lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase ) // 2
UpperCamelCase_ : Tuple = len(lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowerCamelCase ) )
def __lowercase ( lowerCamelCase : str ):
if len(lowerCamelCase ) <= 2:
return True
if s[0] == s[len(lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __lowercase ( lowerCamelCase : str ):
return s == s[::-1]
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Optional[Any] = F"all({name}(key) is value for key, value in test_data.items())"
UpperCamelCase_ : Tuple = F"from __main__ import test_data, {name}"
UpperCamelCase_ : List[str] = 500000
UpperCamelCase_ : int = timeit(stmt=lowerCamelCase , setup=lowerCamelCase , number=lowerCamelCase )
print(F"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 50 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ :List[str] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :str = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ :Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase__ :int = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , )
is not None
):
lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowercase = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowercase = True
if not attribute_used:
lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase = True
elif attribute.endswith('''_token_id''' ):
lowercase = True
# configuration class specific cases
if not case_allowed:
lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase = {}
if len(config_class.attribute_map ) > 0:
lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase = inspect.getsourcefile(lowerCAmelCase__ )
lowercase = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowercase = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
lowercase = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = unused_attributes
if len(lowerCAmelCase__ ) > 0:
lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 101 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''num_attention_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : str=13, lowerCamelCase : Union[str, Any]=64, lowerCamelCase : str=3, lowerCamelCase : int=3, lowerCamelCase : Dict=2, lowerCamelCase : int=1, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Dict=[128, 256, 384], lowerCamelCase : Tuple=[4, 6, 8], lowerCamelCase : Optional[Any]=[2, 3, 4], lowerCamelCase : str=[16, 16, 16], lowerCamelCase : Dict=0, lowerCamelCase : List[str]=[2, 2, 2], lowerCamelCase : str=[2, 2, 2], lowerCamelCase : List[Any]=0.02, lowerCamelCase : Any=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=2, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = LevitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowercase__ = problem_type['''title''']
lowercase__ = problem_type['''num_labels''']
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase ) as warning_list:
lowercase__ = model(**lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : int ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
return str(lowerCAmelCase__ ) == str(lowerCAmelCase__ )[::-1]
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
return int(lowerCAmelCase__ ) + int(str(lowerCAmelCase__ )[::-1] )
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: Dict = []
for num in range(1 , lowerCAmelCase__ ):
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: List[Any] = num
while iterations < 5_0:
UpperCAmelCase_: Tuple = sum_reverse(lowerCAmelCase__ )
iterations += 1
if is_palindrome(lowerCAmelCase__ ):
break
else:
lychrel_nums.append(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 82 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a ( unittest.TestCase ):
A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# No kwarg
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", ["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: List[Any] = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics, public health""" )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: Tuple = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: str = classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""This text is about {}""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase_: Union[str, Any] = classifier(["""I am happy"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(1 )
], )
UpperCAmelCase_: Dict = classifier(["""I am happy""", """I am sad"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(2 )
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""""", candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(SCREAMING_SNAKE_CASE_, candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels="""""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""Not formatting template""", )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template=SCREAMING_SNAKE_CASE_, )
self.run_entailment_id(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = zero_shot_classifier.model.config
UpperCAmelCase_: Optional[int] = config.labelaid
UpperCAmelCase_: str = zero_shot_classifier.entailment_id
UpperCAmelCase_: Union[str, Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
UpperCAmelCase_: int = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Dict = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
UpperCAmelCase_: Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE_, zero_shot_classifier.entailment_id )
@require_torch
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100, candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
UpperCAmelCase_: Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@require_tf
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""tf""", )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@slow
@require_torch
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""pt""" )
UpperCAmelCase_: Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
@slow
@require_tf
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""tf""" )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
| 82 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=7,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=99,__lowerCamelCase=32,__lowerCamelCase=2,__lowerCamelCase=4,__lowerCamelCase=37,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=512,__lowerCamelCase=16,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=3,__lowerCamelCase=4,__lowerCamelCase=None,):
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = '''gelu'''
A__ = 0.1
A__ = 0.1
A__ = 512
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def UpperCamelCase ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = RoFormerConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=__lowerCamelCase,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = TFRoFormerModel(config=__lowerCamelCase )
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = True
A__ = TFRoFormerForCausalLM(config=__lowerCamelCase )
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(__lowerCamelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ),[self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = TFRoFormerForMaskedLM(config=__lowerCamelCase )
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = TFRoFormerForSequenceClassification(config=__lowerCamelCase )
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_choices
A__ = TFRoFormerForMultipleChoice(config=__lowerCamelCase )
A__ = tf.tile(tf.expand_dims(__lowerCamelCase,1 ),(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(__lowerCamelCase,1 ),(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(__lowerCamelCase,1 ),(1, self.num_choices, 1) )
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = TFRoFormerForTokenClassification(config=__lowerCamelCase )
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = TFRoFormerForQuestionAnswering(config=__lowerCamelCase )
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase ( self ):
A__ = TFRoFormerModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
A__ = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
A__ = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(__lowerCamelCase )[0]
# TODO Replace vocab size
A__ = 5_0000
A__ = [1, 6, vocab_size]
self.assertEqual(output.shape,__lowerCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
A__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3],__lowerCamelCase,atol=1E-4 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ):
A__ = tf.constant([[4, 10]] )
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6,embedding_dim=6 )
A__ = emba(input_ids.shape )
A__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCamelCase,__lowerCamelCase,atol=self.tolerance )
def UpperCamelCase ( self ):
A__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512,embedding_dim=512 )
emba([2, 16, 512] )
A__ = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCamelCase,__lowerCamelCase,atol=self.tolerance )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ):
# 2,12,16,64
A__ = tf.reshape(tf.range(2 * 12 * 16 * 64,dtype=tf.floataa ),shape=(2, 12, 16, 64) ) / 100
A__ = -tf.reshape(tf.range(2 * 12 * 16 * 64,dtype=tf.floataa ),shape=(2, 12, 16, 64) ) / 100
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32,embedding_dim=64 )
A__ = embed_positions([2, 16, 768] )[None, None, :, :]
A__ , A__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
A__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8],__lowerCamelCase,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8],__lowerCamelCase,atol=self.tolerance )
| 193 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int )->int:
# Initialise PyTorch model
A__ = BertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
A__ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__: Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 193 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase = get_logger(__name__)
class _a :
_lowercase : Tuple = '''dummy_data'''
_lowercase : List[str] = '''datasets'''
_lowercase : Dict = False
def __init__( self: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = dataset_name
lowercase__ = cache_dir
lowercase__ = use_local_dummy_data
lowercase__ = config
# download_callbacks take a single url as input
lowercase__ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase__ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase__ = str(UpperCamelCase_ )
# to be downloaded
lowercase__ = None
lowercase__ = None
@property
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
lowercase__ = self.download_dummy_data()
return self._dummy_file
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase__ = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
if self._bucket_url is None:
lowercase__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: Any ) -> Tuple:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase__ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase__ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.download_and_extract(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ) -> List[Any]:
"""simple docstring"""
return self.download_and_extract(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ) -> Tuple:
"""simple docstring"""
return path
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
return {}
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
lowercase__ = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
lowercase__ = single_urls
lowercase__ = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
lowercase__ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase__ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase__ = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , UpperCamelCase_ ) ) for url in data_url )
lowercase__ = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase__ = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ) -> Optional[int]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
def _iter_archive_members(UpperCamelCase_: int ):
# this preserves the order of the members inside the ZIP archive
lowercase__ = Path(self.dummy_file ).parent
lowercase__ = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase__ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
lowercase__ = Path(UpperCamelCase_ )
lowercase__ = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open('''rb''' )
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> Optional[int]:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ ) | 356 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 93 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( a_ ):
UpperCAmelCase_ : List[Any] =(EulerDiscreteScheduler,)
UpperCAmelCase_ : Optional[Any] =10
def UpperCamelCase_ ( self , **_lowerCamelCase ):
lowercase = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**_lowerCamelCase )
return config
def UpperCamelCase_ ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCamelCase_ ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def UpperCamelCase_ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def UpperCamelCase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
lowercase = model(_lowerCamelCase , _lowerCamelCase )
lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def UpperCamelCase_ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(prediction_type='v_prediction' )
lowercase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
lowercase = model(_lowerCamelCase , _lowerCamelCase )
lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def UpperCamelCase_ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
lowercase = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
lowercase = model(_lowerCamelCase , _lowerCamelCase )
lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def UpperCamelCase_ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
lowercase = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
lowercase = model(_lowerCamelCase , _lowerCamelCase )
lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 220 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = parent
lowercase = config_class
lowercase = has_text_modality
lowercase = kwargs
lowercase = common_properties
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , 'config.json' )
config_first.to_json_file(_lowerCamelCase )
lowercase = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowercase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCamelCase_ ( self ):
if self.config_class.is_composition:
return
lowercase = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = copy.deepcopy(_lowerCamelCase )
lowercase = self.config_class(**_lowerCamelCase )
lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
lowercase = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def UpperCamelCase_ ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 220 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE : Union[str, Any] = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE : int = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase_)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = create_student_by_copying_alternating_layers(lowercase_, tempfile.mkdtemp(), e=1, d=1)
self.assertEqual(student.config.num_hidden_layers, 1)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = create_student_by_copying_alternating_layers(lowercase_, tempfile.mkdtemp(), e=1, d=lowercase_)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = create_student_by_copying_alternating_layers(lowercase_, tempfile.mkdtemp(), e=1, d=lowercase_)
self.assertEqual(student.config.encoder_layers, 1)
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = create_student_by_copying_alternating_layers(lowercase_, tempfile.mkdtemp(), e=1, d=1)
self.assertEqual(student.config.encoder_layers, 1)
self.assertEqual(student.config.decoder_layers, 1)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
with self.assertRaises(lowercase_):
create_student_by_copying_alternating_layers(lowercase_, tempfile.mkdtemp(), e=lowercase_, d=lowercase_)
| 358 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowercase : Any = TOKENIZER_CLASSES
else:
_lowercase : Tuple = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowercase : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
_lowercase : Any = True
if checkpoint_name is None:
_lowercase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowercase : List[Any] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowercase : Union[str, Any] = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowercase , _lowercase : str = checkpoint.split('/' )
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
_lowercase : Union[str, Any] = checkpoint
_lowercase : List[str] = dump_path
else:
_lowercase : str = None
_lowercase : Any = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowercase : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowercase : List[Any] = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowercase : Optional[Any] = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCamelCase_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 84 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A : str = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowerCamelCase ( __a :Union[str, Any] , __a :Optional[Any] , __a :Dict , __a :str , __a :List[Any] , __a :Dict ) -> List[str]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
F' reinstalling {pkg}.' )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
F'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def __lowerCamelCase ( __a :str , __a :Optional[str] = None ) -> None:
"""simple docstring"""
A__ = F'\n{hint}' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __a ):
A__ , A__ , A__ = requirement, None, None
else:
A__ = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F' got {requirement}' )
A__ , A__ = match[0]
A__ = want_full.split(""",""" ) # there could be multiple requirements
A__ = {}
for w in want_range:
A__ = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F' but got {requirement}' )
A__ , A__ = match[0]
A__ = want_ver
if op not in ops:
raise ValueError(F'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
A__ = """.""".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
A__ = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def __lowerCamelCase ( __a :Optional[Any] ) -> int:
"""simple docstring"""
A__ = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__a , __a )
| 274 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
A__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : str ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : str ) -> Any:
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 274 | 1 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE_: List[str] =HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE_: List[str] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE_: int =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE_: Dict =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 106 | '''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_: Tuple =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={'facebook/bart-base': BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_: int ={'facebook/bart-base': BartTokenizer}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case_ , default=snake_case_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case_ , default=snake_case_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--config_name" , type=snake_case_ , default=snake_case_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case_ , default=snake_case_ , help="Where to store the final ONNX file." )
UpperCAmelCase_ = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int="cpu" ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = model_dict[model_name].from_pretrained(snake_case_ ).to(snake_case_ )
UpperCAmelCase_ = tokenizer_dict[model_name].from_pretrained(snake_case_ )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
return huggingface_model, tokenizer
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
model.eval()
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.jit.script(BARTBeamSearchGenerator(snake_case_ ) )
with torch.no_grad():
UpperCAmelCase_ = "My friends are cool but they eat too many carbs."
UpperCAmelCase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt" ).to(model.device )
UpperCAmelCase_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case_ , max_length=snake_case_ , early_stopping=snake_case_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case_ , )
logger.info("Model exported to {}".format(snake_case_ ) )
UpperCAmelCase_ = remove_dup_initializers(os.path.abspath(snake_case_ ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case_ ) )
UpperCAmelCase_ = onnxruntime.InferenceSession(snake_case_ )
UpperCAmelCase_ = ort_sess.run(
snake_case_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case_ ),
"max_length": np.array(snake_case_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parse_args()
UpperCAmelCase_ = 5
UpperCAmelCase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ = torch.device(args.device )
UpperCAmelCase_ , UpperCAmelCase_ = load_model_tokenizer(args.model_name_or_path , snake_case_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case_ )
if args.max_length:
UpperCAmelCase_ = args.max_length
if args.num_beams:
UpperCAmelCase_ = args.num_beams
if args.output_file_path:
UpperCAmelCase_ = args.output_file_path
else:
UpperCAmelCase_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 106 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class __lowercase (_snake_case ):
def __init__( self ) ->Optional[int]:
'''simple docstring'''
self.test()
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = 0
__lowerCAmelCase : Any = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase : int = self.advance()
if not self.does_advance(__snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__lowerCAmelCase : List[Any] = self.update(__snake_case )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase__ ( self , A_=False ) ->Dict:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __lowercase (_snake_case ):
def __init__( self , A_ ) ->Dict:
'''simple docstring'''
super(__snake_case , self ).__init__()
if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(__snake_case , __snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
__lowerCAmelCase : Optional[Any] = token_ids
__lowerCAmelCase : List[str] = len(self.token_ids )
__lowerCAmelCase : Dict = -1 # the index of the currently fulfilled step
__lowerCAmelCase : Dict = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}""" )
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : List[str] = False
if self.does_advance(__snake_case ):
self.fulfilled_idx += 1
__lowerCAmelCase : Tuple = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = completed
else:
# failed to make progress.
__lowerCAmelCase : Dict = True
self.reset()
return stepped, completed, reset
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Union[str, Any] = 0
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase__ ( self , A_=False ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase : Any = self.seqlen
__lowerCAmelCase : Optional[Any] = self.fulfilled_idx
__lowerCAmelCase : str = self.completed
return new_constraint
class __lowercase :
def __init__( self , A_ , A_=True ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Tuple = max([len(__snake_case ) for one in nested_token_ids] )
__lowerCAmelCase : str = {}
for token_ids in nested_token_ids:
__lowerCAmelCase : Dict = root
for tidx, token_id in enumerate(__snake_case ):
if token_id not in level:
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Dict = level[token_id]
if no_subsets and self.has_subsets(__snake_case , __snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f""" {nested_token_ids}.""" )
__lowerCAmelCase : str = root
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.trie
for current_token in current_seq:
__lowerCAmelCase : Optional[Any] = start[current_token]
__lowerCAmelCase : Union[str, Any] = list(start.keys() )
return next_tokens
def UpperCamelCase__ ( self , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.next_tokens(__snake_case )
return len(__snake_case ) == 0
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = list(root.values() )
if len(__snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(__snake_case ) for nn in next_nodes] )
def UpperCamelCase__ ( self , A_ , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.count_leaves(__snake_case )
return len(__snake_case ) != leaf_count
class __lowercase (_snake_case ):
def __init__( self , A_ ) ->int:
'''simple docstring'''
super(__snake_case , self ).__init__()
if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(__snake_case , __snake_case ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(__snake_case , __snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
__lowerCAmelCase : Any = DisjunctiveTrie(__snake_case )
__lowerCAmelCase : int = nested_token_ids
__lowerCAmelCase : Tuple = self.trie.max_height
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : List[Any] = False
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.trie.next_tokens(self.current_seq )
if len(__snake_case ) == 0:
return None
else:
return token_list
def UpperCamelCase__ ( self , A_ ) ->Dict:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}""" )
__lowerCAmelCase : Any = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}""" )
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Any = False
if self.does_advance(__snake_case ):
self.current_seq.append(__snake_case )
__lowerCAmelCase : Optional[Any] = True
else:
__lowerCAmelCase : List[str] = True
self.reset()
__lowerCAmelCase : List[Any] = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase : List[Any] = completed
return stepped, completed, reset
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Union[str, Any] = []
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase__ ( self , A_=False ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : int = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase : str = self.seqlen
__lowerCAmelCase : Dict = self.current_seq
__lowerCAmelCase : Dict = self.completed
return new_constraint
class __lowercase :
def __init__( self , A_ ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase : Any = max([c.seqlen for c in constraints] )
__lowerCAmelCase : Tuple = len(__snake_case )
__lowerCAmelCase : Union[str, Any] = False
self.init_state()
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Optional[int] = [constraint.copy(stateful=__snake_case ) for constraint in self.constraints]
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase : List[Any] = constraint.advance()
if isinstance(__snake_case , __snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case , __snake_case ):
token_list.extend(__snake_case )
else:
__lowerCAmelCase : int = self.inprogress_constraint.advance()
if isinstance(__snake_case , __snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case , __snake_case ):
token_list.extend(__snake_case )
if len(__snake_case ) == 0:
return None
else:
return token_list
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase : Union[str, Any] = self.add(__snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase__ ( self , A_ ) ->Dict:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
__lowerCAmelCase : Any = False, False
if self.completed:
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Union[str, Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase : List[str] = self.inprogress_constraint.update(__snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__snake_case ) )
__lowerCAmelCase : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase : Dict = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase : Union[str, Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__snake_case ):
__lowerCAmelCase : Any = pending_constraint.update(__snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__snake_case )
__lowerCAmelCase : Union[str, Any] = None
if not complete and stepped:
__lowerCAmelCase : str = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase : Tuple = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase : int = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase__ ( self , A_=True ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase : List[str] = [
constraint.copy(stateful=__snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase : List[Any] = self.inprogress_constraint.copy(stateful=__snake_case )
__lowerCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 275 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = [10, 20, 30, 40, 50, 60]
_SCREAMING_SNAKE_CASE : List[str] = [2, 4, 6, 8, 10, 12]
_SCREAMING_SNAKE_CASE : str = 100
self.assertEqual(kp.calc_profit(__snake_case , __snake_case , __snake_case ) , 210 )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """Weight can not be negative.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """Profit can not be negative.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(
__snake_case , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 200 | 0 |
from math import sqrt
def lowerCAmelCase_ ( _snake_case : int = 1000000 ) -> int:
'''simple docstring'''
__magic_name__ : str = 0
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Optional[Any] = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 359 |
import numpy
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ : Any = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ : Tuple = numpy.zeros(output_array.shape )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
for iteration in range(1 , iterations + 1 ):
__magic_name__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : str = input_arr
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 41 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Dict ={
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] =[
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 262 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowercase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowercase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
lowercase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:2_56, :]
lowercase__ = in_proj_bias_cross_attn[:2_56]
lowercase__ = in_proj_weight_cross_attn[2_56:5_12, :]
lowercase__ = in_proj_bias_cross_attn[2_56:5_12]
lowercase__ = in_proj_weight_cross_attn[-2_56:, :]
lowercase__ = in_proj_bias_cross_attn[-2_56:]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = image.size
lowercase__ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = 8_00 if '''detection''' in checkpoint_url else 10_00
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = F.to_tensor(SCREAMING_SNAKE_CASE )
lowercase__ = F.normalize(SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = rename_backbone_keys(SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: '''table''', 1: '''table rotated'''}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 1_25
lowercase__ = 6
lowercase__ = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
lowercase__ = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
lowercase__ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
lowercase__ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=SCREAMING_SNAKE_CASE )
lowercase__ = Image.open(SCREAMING_SNAKE_CASE ).convert('''RGB''' )
lowercase__ = normalize(resize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
lowercase__ = model(SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 1_25, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
lowercase__ = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 359 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.test()
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = False
while not completed:
if counter == 1:
self.reset()
lowercase__ = self.advance()
if not self.does_advance(UpperCamelCase_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase__ , lowercase__ , lowercase__ = self.update(UpperCamelCase_ )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> str:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=False ) -> Any:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class _a ( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase_: List[int] ) -> Tuple:
"""simple docstring"""
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
lowercase__ = token_ids
lowercase__ = len(self.token_ids )
lowercase__ = -1 # the index of the currently fulfilled step
lowercase__ = False
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(UpperCamelCase_ ):
self.fulfilled_idx += 1
lowercase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ = True
lowercase__ = completed
else:
# failed to make progress.
lowercase__ = True
self.reset()
return stepped, completed, reset
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = False
lowercase__ = 0
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
lowercase__ = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.fulfilled_idx
lowercase__ = self.completed
return new_constraint
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: List[List[int]] , UpperCamelCase_: int=True ) -> int:
"""simple docstring"""
lowercase__ = max([len(UpperCamelCase_ ) for one in nested_token_ids] )
lowercase__ = {}
for token_ids in nested_token_ids:
lowercase__ = root
for tidx, token_id in enumerate(UpperCamelCase_ ):
if token_id not in level:
lowercase__ = {}
lowercase__ = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f' {nested_token_ids}.' )
lowercase__ = root
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> Any:
"""simple docstring"""
lowercase__ = self.trie
for current_token in current_seq:
lowercase__ = start[current_token]
lowercase__ = list(start.keys() )
return next_tokens
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.next_tokens(UpperCamelCase_ )
return len(UpperCamelCase_ ) == 0
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = list(root.values() )
if len(UpperCamelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase_ ) for nn in next_nodes] )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.count_leaves(UpperCamelCase_ )
return len(UpperCamelCase_ ) != leaf_count
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[int] , UpperCamelCase_: List[List[int]] ) -> List[Any]:
"""simple docstring"""
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(UpperCamelCase_ , UpperCamelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
lowercase__ = DisjunctiveTrie(UpperCamelCase_ )
lowercase__ = nested_token_ids
lowercase__ = self.trie.max_height
lowercase__ = []
lowercase__ = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self: str , UpperCamelCase_: int ) -> Tuple:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
lowercase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase_ ( self: int , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}' )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(UpperCamelCase_ ):
self.current_seq.append(UpperCamelCase_ )
lowercase__ = True
else:
lowercase__ = True
self.reset()
lowercase__ = self.trie.reached_leaf(self.current_seq )
lowercase__ = completed
return stepped, completed, reset
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = False
lowercase__ = []
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any]=False ) -> str:
"""simple docstring"""
lowercase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.current_seq
lowercase__ = self.completed
return new_constraint
class _a :
def __init__( self: int , UpperCamelCase_: List[Constraint] ) -> Dict:
"""simple docstring"""
lowercase__ = constraints
# max # of steps required to fulfill a given constraint
lowercase__ = max([c.seqlen for c in constraints] )
lowercase__ = len(UpperCamelCase_ )
lowercase__ = False
self.init_state()
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = None
lowercase__ = [constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.constraints]
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ = constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
else:
lowercase__ = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[List[int]] ) -> Optional[int]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ , lowercase__ = self.add(UpperCamelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
lowercase__ , lowercase__ = False, False
if self.completed:
lowercase__ = True
lowercase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ , lowercase__ , lowercase__ = self.inprogress_constraint.update(UpperCamelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) )
lowercase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase_ ):
lowercase__ , lowercase__ , lowercase__ = pending_constraint.update(UpperCamelCase_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(UpperCamelCase_ )
lowercase__ = None
if not complete and stepped:
lowercase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ = [
constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ = self.inprogress_constraint.copy(stateful=UpperCamelCase_ )
lowercase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 93 | 0 |
import os
def A_ ( ) -> int:
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCamelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCamelCase : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase : Union[str, Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase : Optional[int] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 52 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Tuple = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a : Optional[int] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "mask2former"
__lowerCamelCase = ["swin"]
__lowerCamelCase = {"hidden_size": "hidden_dim"}
def __init__( self , snake_case__ = None , snake_case__ = 256 , snake_case__ = 256 , snake_case__ = 256 , snake_case__ = 1024 , snake_case__ = "relu" , snake_case__ = 6 , snake_case__ = 10 , snake_case__ = 8 , snake_case__ = 0.0 , snake_case__ = 2048 , snake_case__ = False , snake_case__ = False , snake_case__ = 4 , snake_case__ = 255 , snake_case__ = 100 , snake_case__ = 0.1 , snake_case__ = 2.0 , snake_case__ = 5.0 , snake_case__ = 5.0 , snake_case__ = 12544 , snake_case__ = 3.0 , snake_case__ = 0.75 , snake_case__ = 0.02 , snake_case__ = 1.0 , snake_case__ = True , snake_case__ = [4, 8, 16, 32] , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
lowercase__ : Union[str, Any]= CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=snake_case__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : Dict= backbone_config.pop("model_type" )
lowercase__ : Tuple= CONFIG_MAPPING[backbone_model_type]
lowercase__ : Any= config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
lowercase__ : Optional[Any]= backbone_config
lowercase__ : int= feature_size
lowercase__ : str= mask_feature_size
lowercase__ : Optional[int]= hidden_dim
lowercase__ : Tuple= encoder_feedforward_dim
lowercase__ : List[str]= activation_function
lowercase__ : List[str]= encoder_layers
lowercase__ : str= decoder_layers
lowercase__ : Any= num_attention_heads
lowercase__ : List[str]= dropout
lowercase__ : Tuple= dim_feedforward
lowercase__ : Optional[int]= pre_norm
lowercase__ : Optional[int]= enforce_input_projection
lowercase__ : List[str]= common_stride
lowercase__ : int= ignore_value
lowercase__ : List[Any]= num_queries
lowercase__ : List[str]= no_object_weight
lowercase__ : Dict= class_weight
lowercase__ : Optional[Any]= mask_weight
lowercase__ : Union[str, Any]= dice_weight
lowercase__ : List[str]= train_num_points
lowercase__ : Optional[int]= oversample_ratio
lowercase__ : List[Any]= importance_sample_ratio
lowercase__ : Union[str, Any]= init_std
lowercase__ : Optional[int]= init_xavier_std
lowercase__ : Optional[Any]= use_auxiliary_loss
lowercase__ : Optional[int]= feature_strides
lowercase__ : List[Any]= output_auxiliary_logits
lowercase__ : Optional[Any]= decoder_layers
super().__init__(**snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(
backbone_config=snake_case__ , **snake_case__ , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= copy.deepcopy(self.__dict__ )
lowercase__ : List[Any]= self.backbone_config.to_dict()
lowercase__ : str= self.__class__.model_type
return output
| 352 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase__(A="ro" , A="en" , A="wmt16" , A=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowercase__ : int= f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
lowercase__ : List[Any]= datasets.load_dataset(A , A )
if save_dir is None:
lowercase__ : Union[str, Any]= f'''{dataset}-{pair}'''
lowercase__ : str= Path(A )
save_dir.mkdir(exist_ok=A )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
lowercase__ : Any= "val" if split == "validation" else split
lowercase__ : List[Any]= save_dir.joinpath(f'''{fn}.source''' )
lowercase__ : Optional[Any]= save_dir.joinpath(f'''{fn}.target''' )
lowercase__ : Optional[int]= src_path.open("w+" )
lowercase__ : Any= tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase__ : int= x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 150 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase__ ( unittest.TestCase):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =AudioClassificationPipeline(model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
# test with a raw waveform
_lowercase =np.zeros((3_4_0_0_0,) )
_lowercase =np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> str:
_lowercase =examples
_lowercase =audio_classifier(lowerCamelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase__ , [
{'''score''': ANY(lowerCamelCase__ ), '''label''': ANY(lowerCamelCase__ )},
{'''score''': ANY(lowerCamelCase__ ), '''label''': ANY(lowerCamelCase__ )},
] , )
_lowercase =audio_classifier(lowerCamelCase__ , top_k=1 )
self.assertEqual(
lowerCamelCase__ , [
{'''score''': ANY(lowerCamelCase__ ), '''label''': ANY(lowerCamelCase__ )},
] , )
self.run_torchaudio(lowerCamelCase__ )
@require_torchaudio
def __A (self , UpperCAmelCase ) -> Any:
import datasets
# test with a local file
_lowercase =datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
_lowercase =dataset[0]["""audio"""]["""array"""]
_lowercase =audio_classifier(lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{'''score''': ANY(lowerCamelCase__ ), '''label''': ANY(lowerCamelCase__ )},
{'''score''': ANY(lowerCamelCase__ ), '''label''': ANY(lowerCamelCase__ )},
] , )
@require_torch
def __A (self ) -> List[Any]:
_lowercase ="""anton-l/wav2vec2-random-tiny-classifier"""
_lowercase =pipeline('''audio-classification''' , model=lowerCamelCase__ )
_lowercase =np.ones((8_0_0_0,) )
_lowercase =audio_classifier(lowerCamelCase__ , top_k=4 )
_lowercase =[
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
_lowercase =[
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(lowerCamelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowercase ={"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowercase =audio_classifier(lowerCamelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A (self ) -> Union[str, Any]:
import datasets
_lowercase ="""superb/wav2vec2-base-superb-ks"""
_lowercase =pipeline('''audio-classification''' , model=lowerCamelCase__ )
_lowercase =datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
_lowercase =np.array(dataset[3]['''speech'''] , dtype=np.floataa )
_lowercase =audio_classifier(lowerCamelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __A (self ) -> Dict:
pass
| 5 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_:List[Any] = TypeVar("""KEY""")
SCREAMING_SNAKE_CASE_:Dict = TypeVar("""VAL""")
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
'''simple docstring'''
__lowerCamelCase : KEY
__lowerCamelCase : VAL
class SCREAMING_SNAKE_CASE__ ( _Item ):
'''simple docstring'''
def __init__( self ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def __bool__( self ):
return False
SCREAMING_SNAKE_CASE_:Optional[Any] = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 8, lowerCamelCase__ = 0.75 ):
A : List[str] = initial_block_size
A : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A : Dict = capacity_factor
A : Optional[Any] = 0
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return hash(lowerCamelCase__ ) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return (ind + 1) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = self._buckets[ind]
if not stored:
A : Optional[int] = _Item(lowerCamelCase__, lowerCamelCase__ )
self._len += 1
return True
elif stored.key == key:
A : int = _Item(lowerCamelCase__, lowerCamelCase__ )
return True
else:
return False
def _lowerCAmelCase ( self ):
A : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
A : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._buckets
A : Optional[Any] = [None] * new_size
A : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) * 2 )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) // 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = self._get_bucket_index(lowerCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
A : Dict = self._get_next_ind(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
if self._try_set(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
break
def __setitem__( self, lowerCamelCase__, lowerCamelCase__ ):
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__, lowerCamelCase__ )
def __delitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : Tuple = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__ )
if item is _deleted:
continue
if item.key == key:
A : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__ )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
A : Union[str, Any] = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 116 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=0.6 , _UpperCAmelCase=None , ):
__a : int = parent
__a : Union[str, Any] = batch_size
__a : List[str] = image_size
__a : Optional[int] = patch_size
__a : str = num_channels
__a : List[Any] = is_training
__a : Any = use_labels
__a : List[Any] = hidden_size
__a : str = num_hidden_layers
__a : List[str] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : str = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Tuple = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : int = mask_ratio
__a : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__a : Tuple = (image_size // patch_size) ** 2
__a : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
__a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[int] = None
if self.use_labels:
__a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = ViTMAEModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__a : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__a : Optional[Any] = model(_lowerCamelCase )
__a : List[str] = (self.image_size // self.patch_size) ** 2
__a : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__a : Optional[int] = 1
__a : List[Any] = ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : List[Any] = model(_lowerCamelCase )
__a : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
__a : Tuple = self.prepare_config_and_inputs()
__a : Dict = config_and_inputs
__a : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowerCAmelCase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Tuple = ViTMAEModelTester(self )
__a : List[str] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(_lowerCamelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : str = [*signature.parameters.keys()]
__a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# make masks reproducible
np.random.seed(2 )
__a : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__a : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : int = torch.from_numpy(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__a : List[str] = pt_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__a : str = outputs[0].cpu().numpy()
__a : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
__a : Union[str, Any] = model_class.from_pretrained(_lowerCamelCase )
model.to(_lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
# Make sure we don't have nans
__a : Dict = after_outputs[0].cpu().numpy()
__a : Tuple = 0
__a : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = ViTMAEModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def __A ( ) -> int:
__a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__a : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_lowerCamelCase )
__a : List[str] = self.default_image_processor
__a : List[str] = prepare_img()
__a : List[str] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__a : List[str] = ViTMAEConfig()
__a : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__a : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__a : Optional[int] = model(**_lowerCamelCase , noise=torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase ) )
# verify the logits
__a : List[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__a : List[Any] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCamelCase ) , atol=1e-4 ) ) | 369 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''openai/whisper-base'''
__lowerCAmelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__lowerCAmelCase = '''transcriber'''
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ['''audio''']
__lowerCAmelCase = ['''text''']
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0] | 188 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
A__ = ["""text""", """image""", """audio"""]
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(snake_case , snake_case ):
inputs.append(create_inputs(snake_case ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
for output in outputs:
if isinstance(snake_case , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(snake_case , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(snake_case , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class __lowerCAmelCase :
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
_lowerCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , _snake_case ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_lowerCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = create_inputs(self.tool.inputs )
_lowerCAmelCase = self.tool(*_snake_case )
# There is a single output
if len(self.tool.outputs ) == 1:
_lowerCAmelCase = [outputs]
self.assertListEqual(output_types(_snake_case ) , self.tool.outputs )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = create_inputs(self.tool.inputs )
_lowerCAmelCase = self.tool(*_snake_case )
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = [outputs]
self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) )
for output, output_type in zip(_snake_case , self.tool.outputs ):
_lowerCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_snake_case , _snake_case ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = create_inputs(self.tool.inputs )
_lowerCAmelCase = []
for _input, input_type in zip(_snake_case , self.tool.inputs ):
if isinstance(_snake_case , _snake_case ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_lowerCAmelCase = self.tool(*_snake_case )
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = [outputs]
self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) )
| 82 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __lowerCAmelCase ( lowerCamelCase__ ):
# to overwrite at feature extractactor specific tests
__lowerCamelCase = None
__lowerCamelCase = None
@property
def snake_case ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , """feature_size""" ) )
self.assertTrue(hasattr(_snake_case , """sampling_rate""" ) )
self.assertTrue(hasattr(_snake_case , """padding_value""" ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = self.feat_extract_tester.seq_length_diff
_lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
_lowerCAmelCase = self.feat_extract_tester.min_seq_length
_lowerCAmelCase = self.feat_extract_tester.batch_size
_lowerCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , padding=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" )[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(all(len(_snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
_lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to smallest with np
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to middle
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" , truncation=_snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = 12
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , )
_lowerCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowerCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = min(_snake_case )
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , truncation=_snake_case , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 82 | 1 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
__a = [2, 1, 2, -1]
__a = [1, 2, 3, 4]
def __UpperCAmelCase ( self ):
__a = len(self.first_signal )
__a = len(self.second_signal )
__a = max(_a , _a )
# create a zero matrix of max_length x max_length
__a = [[0] * max_length for i in range(_a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_a ):
__a = deque(self.second_signal )
rotated_signal.rotate(_a )
for j, item in enumerate(_a ):
matrix[i][j] += item
# multiply the matrix with the first signal
__a = np.matmul(np.transpose(_a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 11 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :str = ort.SessionOptions()
lowerCAmelCase_ :int = False
return options
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCAmelCase_ :List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = """A red cat sitting on a park bench"""
lowerCAmelCase_ :int = np.random.RandomState(0 )
lowerCAmelCase_ :Dict = pipe(
prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Dict = output.images
lowerCAmelCase_ :Tuple = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCAmelCase_ :List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
lowerCAmelCase_ :str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = """A red cat sitting on a park bench"""
lowerCAmelCase_ :str = np.random.RandomState(0 )
lowerCAmelCase_ :Dict = pipe(
prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Any = output.images
lowerCAmelCase_ :Any = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Optional[int] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 84 |
"""simple docstring"""
__UpperCamelCase = 0 # The first color of the flag.
__UpperCamelCase = 1 # The second color of the flag.
__UpperCamelCase = 2 # The third color of the flag.
__UpperCamelCase = (red, white, blue)
def lowercase (SCREAMING_SNAKE_CASE_ : list ) -> list:
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return list(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1
SCREAMING_SNAKE_CASE = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = input('''Enter numbers separated by commas:\n''').strip()
__UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 113 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( a__ ):
@staticmethod
@abstractmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ) -> List[Any]:
raise NotImplementedError()
| 371 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 103 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 4_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = [0, 1]
__UpperCAmelCase : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__UpperCAmelCase : str = 0
for j in range(len(_UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 115 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 115 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( __lowercase ,__lowercase ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionDiffEditPipeline
a__ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
a__ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
a__ =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ =frozenset([] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
_UpperCAmelCase : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
_UpperCAmelCase : int = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_zero=_a , )
torch.manual_seed(0 )
_UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[Any] = CLIPTextModel(_a )
_UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = floats_tensor((1, 1_6, 1_6) , rng=random.Random(_a ) ).to(_a )
_UpperCAmelCase : int = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
_UpperCAmelCase : str = torch.manual_seed(_a )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=_a ).manual_seed(_a )
_UpperCAmelCase : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self , A , A=0 ) -> Any:
_UpperCAmelCase : int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[str] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' )
if str(_a ).startswith('''mps''' ):
_UpperCAmelCase : str = torch.manual_seed(_a )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=_a ).manual_seed(_a )
_UpperCAmelCase : Any = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self , A , A=0 ) -> Any:
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a )
_UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : str = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' )
if str(_a ).startswith('''mps''' ):
_UpperCAmelCase : Tuple = torch.manual_seed(_a )
else:
_UpperCAmelCase : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
_UpperCAmelCase : Union[str, Any] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> str:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : str = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_a , _a , _a )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_UpperCAmelCase : List[str] = self.get_dummy_inputs(_a )
_UpperCAmelCase : Any = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
_UpperCAmelCase : Any = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_a , _a ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
_UpperCAmelCase : Any = self.get_dummy_inputs(_a )
_UpperCAmelCase : Dict = pipe_loaded(**_a )[0]
_UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(_a , 1E-4 )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Tuple = '''cpu'''
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
_UpperCAmelCase : Tuple = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : List[Any] = self.get_dummy_mask_inputs(_a )
_UpperCAmelCase : Union[str, Any] = pipe.generate_mask(**_a )
_UpperCAmelCase : Optional[int] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
_UpperCAmelCase : str = np.array([0] * 9 )
_UpperCAmelCase : List[str] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = '''cpu'''
_UpperCAmelCase : List[str] = self.get_dummy_components()
_UpperCAmelCase : List[str] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inversion_inputs(_a )
_UpperCAmelCase : Any = pipe.invert(**_a ).images
_UpperCAmelCase : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_UpperCAmelCase : Tuple = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
_UpperCAmelCase : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = '''cpu'''
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
_UpperCAmelCase : Dict = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
_UpperCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler(**_a )
_UpperCAmelCase : Dict = DPMSolverMultistepInverseScheduler(**_a )
_UpperCAmelCase : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inversion_inputs(_a )
_UpperCAmelCase : Optional[int] = pipe.invert(**_a ).images
_UpperCAmelCase : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_UpperCAmelCase : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
_UpperCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
_UpperCAmelCase : Dict = raw_image.convert('''RGB''' ).resize((7_6_8, 7_6_8) )
_UpperCAmelCase : Optional[Any] = raw_image
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_a , torch_dtype=torch.floataa )
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase : Any = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Union[str, Any] = '''a bowl of fruit'''
_UpperCAmelCase : Optional[Any] = '''a bowl of pears'''
_UpperCAmelCase : List[str] = pipe.generate_mask(
image=self.raw_image , source_prompt=_a , target_prompt=_a , generator=_a , )
_UpperCAmelCase : List[str] = pipe.invert(
prompt=_a , image=self.raw_image , inpaint_strength=0.7 , generator=_a ).latents
_UpperCAmelCase : Optional[int] = pipe(
prompt=_a , mask_image=_a , image_latents=_a , generator=_a , negative_prompt=_a , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[str] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_a , torch_dtype=torch.floataa )
_UpperCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : List[str] = '''a bowl of fruit'''
_UpperCAmelCase : List[str] = '''a bowl of pears'''
_UpperCAmelCase : int = pipe.generate_mask(
image=self.raw_image , source_prompt=_a , target_prompt=_a , generator=_a , )
_UpperCAmelCase : Tuple = pipe.invert(
prompt=_a , image=self.raw_image , inpaint_strength=0.7 , generator=_a , num_inference_steps=2_5 , ).latents
_UpperCAmelCase : Optional[Any] = pipe(
prompt=_a , mask_image=_a , image_latents=_a , generator=_a , negative_prompt=_a , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : Optional[Any] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 350 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =TransfoXLTokenizer
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_UpperCAmelCase : Dict = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , **A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : str = '''<unk> UNwanted , running'''
_UpperCAmelCase : Union[str, Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : str = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
_UpperCAmelCase : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
_UpperCAmelCase : Optional[Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = len(A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 68 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> jnp.ndarray:
'''simple docstring'''
_A = jnp.zeros_like(__lowercase )
_A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_A = shifted_input_ids.at[:, 0].set(__lowercase )
_A = jnp.where(shifted_input_ids == -100 , __lowercase , __lowercase )
return shifted_input_ids
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
| 79 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase_ = datasets.logging.get_logger(__name__)
lowerCamelCase_ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowerCamelCase_ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowerCamelCase_ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
if self.config_name == "default":
_A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_A = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ):
'''simple docstring'''
if gpus is None:
_A = 1 if torch.cuda.is_available() else 0
_A = {"src": sources, "mt": predictions, "ref": references}
_A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )]
_A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 79 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''image_processor''', '''tokenizer''']
a : Union[str, Any] ='''BlipImageProcessor'''
a : Union[str, Any] ='''AutoTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
# add QFormer tokenizer
UpperCamelCase_: Tuple = qformer_tokenizer
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
UpperCamelCase_: Tuple = BatchFeature()
if text is not None:
UpperCamelCase_: Optional[int] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
encoding.update(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.qformer_tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_: Dict = qformer_text_encoding.pop('input_ids' )
UpperCamelCase_: int = qformer_text_encoding.pop('attention_mask' )
if images is not None:
UpperCamelCase_: int = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase )
encoding.update(_lowerCamelCase )
return encoding
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _a ( self ):
UpperCamelCase_: Optional[int] = self.tokenizer.model_input_names
UpperCamelCase_: Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _a ( self , _lowerCamelCase , **_lowerCamelCase ):
if os.path.isfile(_lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
UpperCamelCase_: List[str] = os.path.join(_lowerCamelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_lowerCamelCase )
return super().save_pretrained(_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
UpperCamelCase_: Tuple = AutoTokenizer.from_pretrained(_lowerCamelCase , subfolder='qformer_tokenizer' )
UpperCamelCase_: int = cls._get_arguments_from_pretrained(_lowerCamelCase , **_lowerCamelCase )
args.append(_lowerCamelCase )
return cls(*_lowerCamelCase ) | 362 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Any = mock.Mock()
UpperCamelCase_: Dict = 5_0_0
UpperCamelCase_: Any = {}
UpperCamelCase_: Tuple = HTTPError
UpperCamelCase_: List[str] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Union[str, Any] = mock.Mock()
UpperCamelCase_: Union[str, Any] = 5_0_0
UpperCamelCase_: str = {}
UpperCamelCase_: List[str] = HTTPError
UpperCamelCase_: Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: str = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase_: Optional[int] = tempfile.mktemp()
with open(_lowerCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _lowerCamelCase )
UpperCamelCase_: Tuple = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _lowerCamelCase )
UpperCamelCase_: List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_: Any = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ):
UpperCamelCase_: Optional[int] = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: List[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: int = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Union[str, Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id='test-tokenizer' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: List[str] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Union[str, Any] = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase_: Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: Optional[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Optional[int] = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: str = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Dict = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase_: List[str] = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase_: int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Dict = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _a ( self ):
UpperCamelCase_: Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def _a ( self ):
UpperCamelCase_: int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def _a ( self ):
UpperCamelCase_: str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def _a ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase_: Union[str, Any] = Trie()
UpperCamelCase_: Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ['AB', 'C'] ) | 292 | 0 |
"""simple docstring"""
import pytest
_snake_case = '__dummy_dataset1__'
_snake_case = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def lowerCAmelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCAmelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = dataset_loading_script_name
_a : List[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=UpperCamelCase__ )
_a : Optional[int] = script_dir / F"""{script_name}.py"""
with open(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
| 294 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase_ ( a):
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = data
def __iter__( self):
'''simple docstring'''
for element in self.data:
yield element
def A ( _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : Any = Accelerator(even_batches=_lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if iterable:
_lowerCAmelCase : List[str] = DummyIterableDataset(torch.as_tensor(range(_lowerCamelCase ) ) )
else:
_lowerCAmelCase : Union[str, Any] = TensorDataset(torch.as_tensor(range(_lowerCamelCase ) ) )
_lowerCAmelCase : List[str] = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Tuple = accelerator.prepare(_lowerCamelCase )
return dl
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = create_dataloader(accelerator=_lowerCamelCase , dataset_size=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : str = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = create_accelerator(even_batches=_lowerCamelCase )
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = create_accelerator(even_batches=_lowerCamelCase )
_lowerCAmelCase : int = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Any = accelerator.prepare(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
_lowerCAmelCase : Tuple = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = ddp_model(batch[0].float() )
_lowerCAmelCase : List[Any] = output.sum()
loss.backward()
batch_idxs.append(_lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def A ( _lowerCamelCase ):
'''simple docstring'''
with warnings.catch_warnings(record=_lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = True
_lowerCAmelCase : int = False
_lowerCAmelCase : Any = create_accelerator(even_batches=_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Union[str, Any] = accelerator.prepare(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
_lowerCAmelCase : Optional[Any] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase ):
_lowerCAmelCase : Any = train_dl.batch_sampler.even_batches
_lowerCAmelCase : Union[str, Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[int] = create_accelerator(even_batches=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : str = accelerator.prepare(_lowerCamelCase )
create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCamelCase )
_lowerCAmelCase : str = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase ):
_lowerCAmelCase : int = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = create_accelerator()
_lowerCAmelCase : str = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Union[str, Any] = accelerator.prepare(_lowerCamelCase )
create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCamelCase )
with warnings.catch_warnings(record=_lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase ):
pass
assert issubclass(w[-1].category , _lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_lowerCAmelCase : List[str] = accelerator.state.distributed_type
_lowerCAmelCase : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_state
if __name__ == "__main__":
main()
| 300 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE = frozenset([] )
def _lowerCAmelCase( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowercase__ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__a , set_alpha_to_one=__a , )
lowercase__ : Optional[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__a , set_alpha_to_zero=__a , )
torch.manual_seed(0 )
lowercase__ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
lowercase__ : Dict = CLIPTextModel(__a )
lowercase__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> int:
lowercase__ : List[Any] = floats_tensor((1, 16, 16) , rng=random.Random(__a ) ).to(__a )
lowercase__ : Tuple = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('''mps''' ):
lowercase__ : Union[str, Any] = torch.manual_seed(__a )
else:
lowercase__ : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowercase__ : Optional[Any] = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Optional[Any]:
lowercase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowercase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Optional[int] = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' )
if str(__a ).startswith('''mps''' ):
lowercase__ : Any = torch.manual_seed(__a )
else:
lowercase__ : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowercase__ : List[str] = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> List[Any]:
lowercase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowercase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Any = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' )
if str(__a ).startswith('''mps''' ):
lowercase__ : List[str] = torch.manual_seed(__a )
else:
lowercase__ : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowercase__ : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase( self ) -> int:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
lowercase__ : str = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__a , __a , __a )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ : Dict = self.get_dummy_inputs(__a )
lowercase__ : Dict = pipe(**__a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__a )
lowercase__ : int = self.pipeline_class.from_pretrained(__a )
pipe_loaded.to(__a )
pipe_loaded.set_progress_bar_config(disable=__a )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__a , __a ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowercase__ : Dict = self.get_dummy_inputs(__a )
lowercase__ : Tuple = pipe_loaded(**__a )[0]
lowercase__ : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(__a , 1E-4 )
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[Any] = """cpu"""
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : Optional[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowercase__ : int = self.get_dummy_mask_inputs(__a )
lowercase__ : Union[str, Any] = pipe.generate_mask(**__a )
lowercase__ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ : Any = np.array([0] * 9 )
lowercase__ : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Any = """cpu"""
lowercase__ : Any = self.get_dummy_components()
lowercase__ : List[str] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowercase__ : List[Any] = self.get_dummy_inversion_inputs(__a )
lowercase__ : Any = pipe.invert(**__a ).images
lowercase__ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ : List[str] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowercase__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
def _lowerCAmelCase( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = """cpu"""
lowercase__ : str = self.get_dummy_components()
lowercase__ : List[str] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
lowercase__ : int = DPMSolverMultistepScheduler(**__a )
lowercase__ : List[str] = DPMSolverMultistepInverseScheduler(**__a )
lowercase__ : str = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowercase__ : Optional[Any] = self.get_dummy_inversion_inputs(__a )
lowercase__ : Union[str, Any] = pipe.invert(**__a ).images
lowercase__ : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ : List[Any] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowerCAmelCase( cls ) -> Tuple:
lowercase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
lowercase__ : Tuple = raw_image.convert('''RGB''' ).resize((768, 768) )
lowercase__ : int = raw_image
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__a , torch_dtype=torch.floataa )
lowercase__ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ : Union[str, Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
lowercase__ : str = """a bowl of fruit"""
lowercase__ : Optional[Any] = """a bowl of pears"""
lowercase__ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=__a , target_prompt=__a , generator=__a , )
lowercase__ : int = pipe.invert(
prompt=__a , image=self.raw_image , inpaint_strength=0.7 , generator=__a ).latents
lowercase__ : Optional[int] = pipe(
prompt=__a , mask_image=__a , image_latents=__a , generator=__a , negative_prompt=__a , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
lowercase__ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__a , torch_dtype=torch.floataa )
lowercase__ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ : Optional[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
lowercase__ : Union[str, Any] = """a bowl of fruit"""
lowercase__ : List[Any] = """a bowl of pears"""
lowercase__ : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=__a , target_prompt=__a , generator=__a , )
lowercase__ : str = pipe.invert(
prompt=__a , image=self.raw_image , inpaint_strength=0.7 , generator=__a , num_inference_steps=25 , ).latents
lowercase__ : Union[str, Any] = pipe(
prompt=__a , mask_image=__a , image_latents=__a , generator=__a , negative_prompt=__a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
lowercase__ : List[str] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 198 |
lowerCamelCase : Dict = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 233 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=13 ,_UpperCAmelCase : Dict=7 ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Optional[Any]=99 ,_UpperCAmelCase : List[str]=32 ,_UpperCAmelCase : Any=2 ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : Tuple="gelu" ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : List[Any]=512 ,_UpperCAmelCase : List[str]=16 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : List[str]=0.02 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Union[str, Any]="None" ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : Optional[Any]=None ,):
_a : List[Any] = parent
_a : Tuple = batch_size
_a : int = seq_length
_a : Union[str, Any] = is_training
_a : Dict = use_input_mask
_a : int = use_token_type_ids
_a : List[Any] = use_labels
_a : Tuple = vocab_size
_a : Any = hidden_size
_a : Optional[Any] = num_hidden_layers
_a : int = num_attention_heads
_a : int = intermediate_size
_a : int = hidden_act
_a : Any = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : Optional[int] = type_vocab_size
_a : Dict = type_sequence_label_size
_a : Tuple = initializer_range
_a : Optional[Any] = num_labels
_a : Dict = num_choices
_a : List[str] = relative_attention
_a : str = position_biased_input
_a : Dict = pos_att_type
_a : Optional[Any] = scope
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Optional[Any] = None
if self.use_input_mask:
_a : Any = random_attention_mask([self.batch_size, self.seq_length] )
_a : List[str] = None
if self.use_token_type_ids:
_a : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_a : List[str] = None
_a : Tuple = None
_a : List[str] = None
if self.use_labels:
_a : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : List[Any] = DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,initializer_range=self.initializer_range ,return_dict=_UpperCAmelCase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : str ):
_a : List[str] = TFDebertaVaModel(config=_UpperCAmelCase )
_a : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_a : Any = [input_ids, input_mask]
_a : Dict = model(_UpperCAmelCase )
_a : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ):
_a : Optional[Any] = TFDebertaVaForMaskedLM(config=_UpperCAmelCase )
_a : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ):
_a : List[Any] = self.num_labels
_a : int = TFDebertaVaForSequenceClassification(config=_UpperCAmelCase )
_a : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[str] ):
_a : str = self.num_labels
_a : Any = TFDebertaVaForTokenClassification(config=_UpperCAmelCase )
_a : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ):
_a : List[str] = TFDebertaVaForQuestionAnswering(config=_UpperCAmelCase )
_a : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[Any] ):
_a : Dict = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : int = config_and_inputs
_a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase : str = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[Any] = False
def __lowercase ( self : List[Any] ):
_a : Dict = TFDebertaVaModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowercase ( self : int ):
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def __lowercase ( self : Tuple ):
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[Any] ):
_a : List[str] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __lowercase ( self : Union[str, Any] ):
pass
@slow
def __lowercase ( self : str ):
_a : Optional[Any] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_a : List[Any] = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_a : Any = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a : Tuple = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase )[0]
_a : Dict = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] ,_UpperCAmelCase ,atol=1E-4 )
| 107 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
return "".join(sorted(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCAmelCase_ )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 107 | 1 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = """ml.p3.2xlarge"""
lowerCAmelCase__ : Optional[Any] = """accelerate_sagemaker_execution_role"""
lowerCAmelCase__ : Optional[int] = """hf-sm"""
lowerCAmelCase__ : List[Any] = """us-east-1"""
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : List[str] = """accelerate-sagemaker-1"""
lowerCAmelCase__ : Any = """1.6"""
lowerCAmelCase__ : Optional[Any] = """4.4"""
lowerCAmelCase__ : Union[str, Any] = """train.py"""
lowerCAmelCase__ : str = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCAmelCase__ : Tuple = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase )
with pytest.raises(UpperCamelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 2 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 0 |
class A__ ( snake_case__ ):
"""simple docstring"""
pass
class A__ ( snake_case__ ):
"""simple docstring"""
pass
class A__ :
"""simple docstring"""
def __init__( self ):
snake_case = [
[],
[],
[],
]
def a_ ( self , __snake_case , __snake_case ):
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__snake_case )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def a_ ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self ):
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class A__ :
"""simple docstring"""
def __init__( self ):
snake_case = []
def a_ ( self , __snake_case ):
if len(self.queue ) == 1_0_0:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__snake_case )
def a_ ( self ):
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
snake_case = min(self.queue )
self.queue.remove(__snake_case )
return data
def __str__( self ):
return str(self.queue )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,1_00 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,1_28 )
print(UpperCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(UpperCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 213 |
import math
class A__ :
"""simple docstring"""
def a_ ( self , __snake_case , __snake_case ):
snake_case = 0.0
snake_case = 0.0
for i in range(len(__snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case = SelfOrganizingMap()
snake_case = 3
snake_case = 0.5
for _ in range(UpperCamelCase_ ):
for j in range(len(UpperCamelCase_ ) ):
# training sample
snake_case = training_samples[j]
# Compute the winning vector
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# Update the winning vector
snake_case = self_organizing_map.update(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# classify test sample
snake_case = [0, 0, 0, 1]
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 213 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A : List[Any] = logging.getLogger()
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCamelCase_ =parser.parse_args()
return args.f
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =os.path.join(_A , """all_results.json""" )
if os.path.exists(_A ):
with open(_A , """r""" ) as f:
lowerCamelCase_ =json.load(_A )
else:
raise ValueError(f'can\'t find {path}' )
return results
def __UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
__A : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
@classmethod
def _snake_case ( cls )-> Any:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCamelCase_ =["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _snake_case ( cls )-> Any:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCamelCase_ =7 if get_gpu_count() > 1 else 2
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def _snake_case ( self )-> Any:
lowerCamelCase_ =logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCamelCase_ =get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) )
| 154 |
def __UpperCamelCase ( _A : Dict ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =[0] * len(_A )
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
lowerCamelCase_ =queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print("""Cycle exists""" )
else:
print(_A )
# Adjacency List of Graph
__A : List[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 154 | 1 |
import datasets
from .evaluate import evaluate
_UpperCAmelCase = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_UpperCAmelCase = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_UpperCAmelCase = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCamelCase_ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 328 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[Any]:
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase_ = [image]
UpperCamelCase_ = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase_ = torch.stack(UpperCamelCase_ )
return image
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[Any]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
print("add noise to latents at timestep" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: Union[torch.FloatTensor, PIL.Image.Image] = None , _SCREAMING_SNAKE_CASE: float = 0.8 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 50 , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_SCREAMING_SNAKE_CASE )
# 2. Preprocess image
UpperCamelCase_ = preprocess(_SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
UpperCamelCase_ = timesteps[:1].repeat(_SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
UpperCamelCase_ = self.prepare_latents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
UpperCamelCase_ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 328 | 1 |
import doctest
from collections import deque
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> None:
_A : Tuple = [2, 1, 2, -1]
_A : Dict = [1, 2, 3, 4]
def _lowerCamelCase ( self) -> list[float]:
_A : int = len(self.first_signal)
_A : List[str] = len(self.second_signal)
_A : List[str] = max(__lowerCamelCase , __lowerCamelCase)
# create a zero matrix of max_length x max_length
_A : Dict = [[0] * max_length for i in range(__lowerCamelCase)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase):
_A : int = deque(self.second_signal)
rotated_signal.rotate(__lowerCamelCase)
for j, item in enumerate(__lowerCamelCase):
matrix[i][j] += item
# multiply the matrix with the first signal
_A : Dict = np.matmul(np.transpose(__lowerCamelCase) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 11 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
lowerCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
lowerCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase_ : int = x_den * y_den * z_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_ ( lowerCAmelCase__ : int = 35 ):
"""simple docstring"""
lowerCAmelCase_ : set = set()
lowerCAmelCase_ : int
lowerCAmelCase_ : Fraction = Fraction(0 )
lowerCAmelCase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase_ : str = x_num * y_den + x_den * y_num
lowerCAmelCase_ : int = x_den * y_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : List[str] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[str] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Union[str, Any] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
lowerCAmelCase_ : Dict = x_num * y_num
lowerCAmelCase_ : Optional[int] = x_den * y_num + x_num * y_den
lowerCAmelCase_ : Any = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Tuple = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : List[str] = x_num * x_num * y_num * y_num
lowerCAmelCase_ : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[Any] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[int] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Any = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 371 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ : Any = TypeVar("""_T""")
class UpperCamelCase__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : List[str] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : _T ):
self._stacka.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = self._stacka.pop
lowerCAmelCase_ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = [0.4814_5466, 0.457_8275, 0.4082_1073] , SCREAMING_SNAKE_CASE_ = [0.2686_2954, 0.2613_0258, 0.2757_7711] , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=3 , ) -> List[str]:
UpperCamelCase :Optional[int] = parent
UpperCamelCase :List[Any] = do_resize
UpperCamelCase :Dict = size if size is not None else {'''shortest_edge''': 288}
UpperCamelCase :int = size_divisor
UpperCamelCase :List[str] = do_rescale
UpperCamelCase :List[Any] = rescale_factor
UpperCamelCase :List[Any] = do_normalize
UpperCamelCase :Optional[Any] = do_center_crop
UpperCamelCase :Optional[Any] = image_mean
UpperCamelCase :Dict = image_std
UpperCamelCase :List[Any] = do_pad
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :Optional[int] = num_channels
UpperCamelCase :Any = min_resolution
UpperCamelCase :Optional[int] = max_resolution
def UpperCAmelCase ( self ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Any:
if not batched:
UpperCamelCase :Optional[int] = self.size['''shortest_edge''']
UpperCamelCase :List[str] = image_inputs[0]
if isinstance(A_ , Image.Image ):
UpperCamelCase :str = image.size
else:
UpperCamelCase :Tuple = image.shape[1], image.shape[2]
UpperCamelCase :str = size / min(A_ , A_ )
if h < w:
UpperCamelCase :Union[str, Any] = size, scale * w
else:
UpperCamelCase :Dict = scale * h, size
UpperCamelCase :Union[str, Any] = int((1333 / 800) * size )
if max(A_ , A_ ) > max_size:
UpperCamelCase :Tuple = max_size / max(A_ , A_ )
UpperCamelCase :int = newh * scale
UpperCamelCase :int = neww * scale
UpperCamelCase :List[Any] = int(newh + 0.5 ), int(neww + 0.5 )
UpperCamelCase :Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCamelCase :Any = []
for image in image_inputs:
UpperCamelCase :int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase :List[str] = max(A_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
UpperCamelCase :List[str] = max(A_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( UpperCamelCase_, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
self.assertTrue(hasattr(A_ , '''size_divisor''' ) )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> Tuple:
# Initialize image processor
UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase :int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase :Optional[int] = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Optional[int] = image_processing(A_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase :str = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ) -> Tuple:
# Initialize image processor
UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase :Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase :List[Any] = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :List[str] = image_processing(A_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase :List[Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ) -> str:
# Initialize image processor
UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase :int = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :str = image_processing(A_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase :int = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 259 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : int = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __snake_case ( UpperCamelCase_ ):
_a = '''roc_bert'''
def __init__( self : List[Any] , A_ : Optional[Any]=3_0_5_2_2 , A_ : List[str]=7_6_8 , A_ : Tuple=1_2 , A_ : List[str]=1_2 , A_ : List[str]=3_0_7_2 , A_ : Any="gelu" , A_ : str=0.1 , A_ : int=0.1 , A_ : Optional[int]=5_1_2 , A_ : int=2 , A_ : List[str]=0.02 , A_ : Union[str, Any]=1e-12 , A_ : Union[str, Any]=True , A_ : Tuple=0 , A_ : Union[str, Any]="absolute" , A_ : Optional[Any]=None , A_ : Any=True , A_ : Optional[int]=True , A_ : List[Any]=7_6_8 , A_ : str=9_1_0 , A_ : Dict=5_1_2 , A_ : Optional[int]=2_4_8_5_8 , A_ : Optional[Any]=True , **A_ : Dict , ):
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Tuple = enable_pronunciation
lowerCAmelCase_ : Optional[Any] = enable_shape
lowerCAmelCase_ : Union[str, Any] = pronunciation_embed_dim
lowerCAmelCase_ : List[Any] = pronunciation_vocab_size
lowerCAmelCase_ : Tuple = shape_embed_dim
lowerCAmelCase_ : str = shape_vocab_size
lowerCAmelCase_ : Optional[int] = concat_input
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=A_ , **A_)
| 103 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __snake_case :
lowerCAmelCase__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowerCAmelCase__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.task_name.lower()
class __snake_case (_a ):
lowerCAmelCase__ = "train"
lowerCAmelCase__ = "dev"
lowerCAmelCase__ = "test"
class __snake_case (_a ):
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = 4_2
def __init__( self : Optional[Any] , _UpperCAmelCase : GlueDataTrainingArguments , _UpperCAmelCase : PreTrainedTokenizerBase , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Union[str, Split] = Split.train , _UpperCAmelCase : Optional[str] = None , ) -> str:
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _UpperCAmelCase , )
_lowerCAmelCase : Dict = args
_lowerCAmelCase : Dict = glue_processors[args.task_name]()
_lowerCAmelCase : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
_lowerCAmelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_lowerCAmelCase : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
_lowerCAmelCase : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase : str = label_list[2], label_list[1]
_lowerCAmelCase : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : Optional[int] = cached_features_file + """.lock"""
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : Union[str, Any] = torch.load(_UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
_lowerCAmelCase : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCAmelCase : Tuple = self.processor.get_test_examples(args.data_dir )
else:
_lowerCAmelCase : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCAmelCase : Optional[int] = examples[:limit_length]
_lowerCAmelCase : Union[str, Any] = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
_lowerCAmelCase : str = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , _UpperCAmelCase : Any ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.label_list
| 354 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCamelCase : str = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case (unittest.TestCase , _a ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_tool("""text-question-answering""" )
self.tool.setup()
_lowerCAmelCase : Optional[Any] = load_tool("""text-question-answering""" , remote=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tool(_UpperCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.remote_tool(_UpperCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tool(text=_UpperCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.remote_tool(text=_UpperCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
| 159 | 0 |
from __future__ import annotations
def UpperCamelCase( __UpperCamelCase : str ):
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def UpperCamelCase( __UpperCamelCase : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase( ):
lowerCAmelCase_ : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' ,__UpperCamelCase )
print('''Decoded:''' ,decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 103 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __snake_case :
def __init__( self : Tuple , A_ : Any , A_ : Tuple=1_3 , A_ : str=7 , A_ : Any=True , A_ : Union[str, Any]=True , A_ : int=False , A_ : int=True , A_ : List[Any]=9_9 , A_ : Dict=6_4 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=6_4 , A_ : str="gelu" , A_ : Union[str, Any]=0.1 , A_ : List[Any]=0.1 , A_ : Any=5_1_2 , A_ : Union[str, Any]=1_6 , A_ : str=2 , A_ : Any=0.02 , A_ : str=3 , A_ : Optional[int]=4 , A_ : int=None , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Dict):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''')
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , A_ : Dict , A_ : int , A_ : Tuple , A_ : List[str] , A_ : str , A_ : List[Any]):
lowerCAmelCase_ : int = MPNetModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Any = model(A_ , A_)
lowerCAmelCase_ : Union[str, Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any):
lowerCAmelCase_ : Any = MPNetForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : List[str] , A_ : Optional[Any] , A_ : Dict , A_ : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Any = MPNetForSequenceClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Dict = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : Dict , A_ : List[str] , A_ : List[Any]):
lowerCAmelCase_ : int = self.num_choices
lowerCAmelCase_ : List[str] = MPNetForMultipleChoice(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : Optional[int] = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Any , A_ : int , A_ : Any , A_ : List[Any] , A_ : Any , A_ : Union[str, Any]):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Tuple = MPNetForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = True
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = MPNetModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''')
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : Union[str, Any] = model(A_)[0]
lowerCAmelCase_ : Optional[int] = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , A_)
lowerCAmelCase_ : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4))
| 103 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int=None , lowerCAmelCase: Union[str, Any]=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=lowerCAmelCase )
@dataclass
class a :
_lowercase = field(
metadata={"help": "The csv file to plot."} , )
_lowercase = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowercase = field(
default=lowerCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowercase = field(
default=lowerCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowercase = field(
default=lowerCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowercase = field(
default=lowerCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowercase = list_field(
default=lowerCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Union[str, Any]:
try:
int(lowerCAmelCase )
return True
except ValueError:
return False
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[Any]:
try:
float(lowerCAmelCase )
return True
except ValueError:
return False
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = args
_UpperCAmelCase : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
_UpperCAmelCase : str = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase : List[str] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
_UpperCAmelCase : Optional[Any] = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
_UpperCAmelCase : Union[str, Any] = float(row["result"] )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Tuple = plt.subplots()
_UpperCAmelCase : List[Any] = "Time usage" if self.args.is_time else "Memory usage"
_UpperCAmelCase : Union[str, Any] = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase : List[Any] = sorted(set(self.result_dict[model_name]["bsz"] ) )
_UpperCAmelCase : Any = sorted(set(self.result_dict[model_name]["seq_len"] ) )
_UpperCAmelCase : Optional[Any] = self.result_dict[model_name]["result"]
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase : Optional[int] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase : Dict = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
_UpperCAmelCase : List[str] = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , "--" )
title_str += f' {label_model_name} vs.'
_UpperCAmelCase : Any = title_str[:-4]
_UpperCAmelCase : Dict = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_UpperCAmelCase : int = HfArgumentParser(lowerCAmelCase )
_UpperCAmelCase : int = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : Optional[Any] = Plot(args=lowerCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 358 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
if b == 0:
return (1, 0)
((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(snake_case , a % b )
_lowerCAmelCase = a // b
return (y, x - k * y)
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(snake_case , snake_case )
_lowerCAmelCase = na * na
_lowerCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(snake_case , snake_case )
if b < 0:
_lowerCAmelCase = (b % n + n) % n
return b
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = invert_modulo(snake_case , snake_case ), invert_modulo(snake_case , snake_case )
_lowerCAmelCase = na * na
_lowerCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 82 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __A ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = (16, 32, 96, 256)
lowerCAmelCase_ = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ = self.block_out_channels[i]
lowerCamelCase__ = self.block_out_channels[i + 1]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = blocks
lowerCamelCase__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
for block in self.blocks:
lowerCamelCase__ = block(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
lowerCamelCase__ = self.conv_out(__lowerCAmelCase )
return embedding
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1280, 1280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = "rgb"
lowerCAmelCase_ = (16, 32, 96, 256)
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(__lowerCAmelCase )
lowerCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.block_out_channels
lowerCamelCase__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
lowerCamelCase__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase__ = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = block_out_channels[0]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = block_out_channels[i]
lowerCamelCase__ = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase__ = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
if not is_final_block:
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
lowerCamelCase__ = down_blocks
lowerCamelCase__ = controlnet_down_blocks
# mid
lowerCamelCase__ = block_out_channels[-1]
lowerCamelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
'''simple docstring'''
lowerCamelCase__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ = jnp.flip(__lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
lowerCamelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ = jnp.expand_dims(__lowerCAmelCase , 0 )
lowerCamelCase__ = self.time_proj(__lowerCAmelCase )
lowerCamelCase__ = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.controlnet_cond_embedding(__lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ = ()
for down_block_res_sample, controlnet_block in zip(__lowerCAmelCase , self.controlnet_down_blocks ):
lowerCamelCase__ = controlnet_block(__lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ = controlnet_down_block_res_samples
lowerCamelCase__ = self.controlnet_mid_block(__lowerCAmelCase )
# 6. scaling
lowerCamelCase__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCAmelCase , mid_block_res_sample=__lowerCAmelCase )
| 209 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
return round(float(moles / volume ) * nfactor )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ : Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __UpperCamelCase ( _UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__UpperCAmelCase : List[str] = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase : int = R".*/layers_(\d+)"
__UpperCAmelCase : List[str] = key
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = re.sub(R"layers_(\d+)", R"block/\1/layer", _UpperCAmelCase )
__UpperCAmelCase : Any = R"(encoder|decoder)\/"
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = re.match(_UpperCAmelCase, _UpperCAmelCase ).groups()
if groups[0] == "encoder":
__UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/", R"/1/mlp/", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = re.sub(R"/pre_mlp_layer_norm/", R"/1/layer_norm/", _UpperCAmelCase )
elif groups[0] == "decoder":
__UpperCAmelCase : List[Any] = re.sub(R"/mlp/", R"/2/mlp/", _UpperCAmelCase )
__UpperCAmelCase : Any = re.sub(R"/pre_mlp_layer_norm/", R"/2/layer_norm/", _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCAmelCase : List[str] = new_key.replace(_UpperCAmelCase, _UpperCAmelCase )
print(F"{key} -> {new_key}" )
__UpperCAmelCase : Any = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Tuple = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__UpperCAmelCase : Any = s_dict[key].shape[0]
__UpperCAmelCase : str = s_dict[key]
for idx in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/', 'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowerCAmelCase__ : Optional[Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = re.findall(R"(.*) = ([0-9.]*)", _UpperCAmelCase )
__UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCAmelCase : Tuple = float(_UpperCAmelCase ) if "." in value else int(_UpperCAmelCase )
__UpperCAmelCase : str = re.findall(R"(.*activations) = \(\'(.*)\',\)", _UpperCAmelCase )[0]
__UpperCAmelCase : int = str(activation[1] )
__UpperCAmelCase : int = num_experts
__UpperCAmelCase : List[str] = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase="./", _UpperCAmelCase=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
__UpperCAmelCase : int = convert_gin_to_config(_UpperCAmelCase, _UpperCAmelCase )
else:
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
__UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
__UpperCAmelCase : str = flax_params["target"]
__UpperCAmelCase : Any = flatten_dict(_UpperCAmelCase, sep="/" )
__UpperCAmelCase : Optional[Any] = rename_keys(_UpperCAmelCase )
__UpperCAmelCase : Any = unflatten_dict(_UpperCAmelCase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCAmelCase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 37 | 1 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ) -> List[str]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCAmelCase : str = mf_knapsack(i - 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
UpperCAmelCase : int = max(
mf_knapsack(i - 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , mf_knapsack(i - 1 , _lowerCAmelCase , _lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
UpperCAmelCase : List[str] = val
return f[i][j]
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]:
UpperCAmelCase : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCAmelCase : int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCAmelCase : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : list , _lowerCAmelCase : list ) -> Dict:
if not (isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(_lowerCAmelCase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCAmelCase : int = len(_lowerCAmelCase )
if num_items != len(_lowerCAmelCase ):
UpperCAmelCase : str = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowerCAmelCase )} values"""
)
raise ValueError(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
if not isinstance(wt[i] , _lowerCAmelCase ):
UpperCAmelCase : Any = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = knapsack(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : set = set()
_construct_solution(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return optimal_val, example_optional_set
def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : set ) -> int:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCAmelCase , _lowerCAmelCase , i - 1 , _lowerCAmelCase , _lowerCAmelCase )
else:
optimal_set.add(_lowerCAmelCase )
_construct_solution(_lowerCAmelCase , _lowerCAmelCase , i - 1 , j - wt[i - 1] , _lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = [3, 2, 4, 4]
UpperCamelCase__: List[str] = [4, 3, 2, 3]
UpperCamelCase__: List[Any] = 4
UpperCamelCase__: Tuple = 6
UpperCamelCase__: Optional[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCamelCase__ , UpperCamelCase__: Tuple = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 23 |
from __future__ import annotations
from typing import Any
def lowercase_ ( _A : list[Any] ):
"""simple docstring"""
create_state_space_tree(_A , [] , 0 )
def lowercase_ ( _A : list[Any] , _A : list[Any] , _A : int ):
"""simple docstring"""
if index == len(_A ):
print(_A )
return
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 184 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a , __a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=__a , image_encoder=__a , image_processor=__a , scheduler=__a , renderer=__a , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
_UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
_UpperCamelCase = latents.to(__a)
_UpperCamelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , __a=0) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
_UpperCamelCase = torch.device(F'''cuda:{gpu_id}''')
_UpperCamelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
if self.device != torch.device('''meta''') or not hasattr(self.image_encoder , '''_hf_hook'''):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__a , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def UpperCAmelCase ( self , __a , __a , __a , __a , ) -> Optional[int]:
'''simple docstring'''
if isinstance(__a , __a) and isinstance(image[0] , torch.Tensor):
_UpperCamelCase = torch.cat(__a , axis=0) if image[0].ndim == 4 else torch.stack(__a , axis=0)
if not isinstance(__a , torch.Tensor):
_UpperCamelCase = self.image_processor(__a , return_tensors='''pt''').pixel_values[0].unsqueeze(0)
_UpperCamelCase = image.to(dtype=self.image_encoder.dtype , device=__a)
_UpperCamelCase = self.image_encoder(__a)['''last_hidden_state''']
_UpperCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCamelCase = image_embeds.repeat_interleave(__a , dim=0)
if do_classifier_free_guidance:
_UpperCamelCase = torch.zeros_like(__a)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__a)
def __call__( self , __a , __a = 1 , __a = 25 , __a = None , __a = None , __a = 4.0 , __a = 64 , __a = "pil" , __a = True , ) -> Optional[int]:
'''simple docstring'''
if isinstance(__a , PIL.Image.Image):
_UpperCamelCase = 1
elif isinstance(__a , torch.Tensor):
_UpperCamelCase = image.shape[0]
elif isinstance(__a , __a) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
_UpperCamelCase = len(__a)
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__a)}''')
_UpperCamelCase = self._execution_device
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_image(__a , __a , __a , __a)
# prior
self.scheduler.set_timesteps(__a , device=__a)
_UpperCamelCase = self.scheduler.timesteps
_UpperCamelCase = self.prior.config.num_embeddings
_UpperCamelCase = self.prior.config.embedding_dim
_UpperCamelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __a , __a , __a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCamelCase = latents.reshape(latents.shape[0] , __a , __a)
for i, t in enumerate(self.progress_bar(__a)):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
_UpperCamelCase = self.prior(
__a , timestep=__a , proj_embedding=__a , ).predicted_image_embedding
# remove the variance
_UpperCamelCase , _UpperCamelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2)
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCamelCase = self.scheduler.step(
__a , timestep=__a , sample=__a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__a)
_UpperCamelCase = []
for i, latent in enumerate(__a):
print()
_UpperCamelCase = self.renderer.decode(
latent[None, :] , __a , size=__a , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(__a)
_UpperCamelCase = torch.stack(__a)
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''')
_UpperCamelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCamelCase = [self.numpy_to_pil(__a) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__a)
| 365 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowercase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowercase__ = 'image_qa'
lowercase__ = AutoProcessor
lowercase__ = AutoModelForVisualQuestionAnswering
lowercase__ = ['image', 'text']
lowercase__ = ['text']
def __init__( self , *__a , **__a) -> int:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , __a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model(**__a).logits
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 100 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( UpperCamelCase : Callable , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : int = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Optional[int] = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ):
from .. import __version__
UpperCAmelCase : Tuple = take_from
UpperCAmelCase : Optional[Any] = ()
if not isinstance(args[0] , UpperCamelCase ):
UpperCAmelCase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
UpperCAmelCase : Optional[int] = None
if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase ),)
UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase , UpperCamelCase ):
values += (getattr(UpperCamelCase , UpperCamelCase ),)
UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0:
UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase : Union[str, Any] = call_frame.filename
UpperCAmelCase : List[Any] = call_frame.lineno
UpperCAmelCase : List[str] = call_frame.function
UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase ) == 0:
return
elif len(UpperCamelCase ) == 1:
return values[0]
return values
| 109 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase_ ( __lowercase, __lowercase):
'''simple docstring'''
__UpperCamelCase : str = '''nat'''
__UpperCamelCase : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
UpperCamelCase : Dict = patch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : List[Any] = embed_dim
UpperCamelCase : List[Any] = depths
UpperCamelCase : str = len(UpperCAmelCase__ )
UpperCamelCase : List[str] = num_heads
UpperCamelCase : Optional[Any] = kernel_size
UpperCamelCase : Dict = mlp_ratio
UpperCamelCase : str = qkv_bias
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : str = drop_path_rate
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Optional[int] = layer_norm_eps
UpperCamelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase : Dict = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
UpperCamelCase : int = layer_scale_init_value
UpperCamelCase : Optional[int] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
UpperCamelCase , UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 361 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class UpperCamelCase__ ( snake_case__ ):
"""simple docstring"""
UpperCAmelCase_ ='encodec'
def __init__( self , _A=[1.5, 3.0, 6.0, 12.0, 24.0] , _A=24000 , _A=1 , _A=False , _A=None , _A=None , _A=128 , _A=32 , _A=1 , _A=[8, 5, 4, 2] , _A="weight_norm" , _A=7 , _A=7 , _A=3 , _A=2 , _A=True , _A="reflect" , _A=2 , _A=2 , _A=1.0 , _A=1024 , _A=None , _A=True , **_A , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = target_bandwidths
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = audio_channels
SCREAMING_SNAKE_CASE_ = normalize
SCREAMING_SNAKE_CASE_ = chunk_length_s
SCREAMING_SNAKE_CASE_ = overlap
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_filters
SCREAMING_SNAKE_CASE_ = num_residual_layers
SCREAMING_SNAKE_CASE_ = upsampling_ratios
SCREAMING_SNAKE_CASE_ = norm_type
SCREAMING_SNAKE_CASE_ = kernel_size
SCREAMING_SNAKE_CASE_ = last_kernel_size
SCREAMING_SNAKE_CASE_ = residual_kernel_size
SCREAMING_SNAKE_CASE_ = dilation_growth_rate
SCREAMING_SNAKE_CASE_ = use_causal_conv
SCREAMING_SNAKE_CASE_ = pad_mode
SCREAMING_SNAKE_CASE_ = compress
SCREAMING_SNAKE_CASE_ = num_lstm_layers
SCREAMING_SNAKE_CASE_ = trim_right_ratio
SCREAMING_SNAKE_CASE_ = codebook_size
SCREAMING_SNAKE_CASE_ = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_A )
@property
def _UpperCamelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _UpperCamelCase ( self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 299 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCamelCase = {'UserAgent': UserAgent().random}
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find('{\"config\"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = F'https://www.instagram.com/{username}/'
SCREAMING_SNAKE_CASE = self.get_json()
def __A ( self ) -> dict:
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=SCREAMING_SNAKE_CASE_ ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(SCREAMING_SNAKE_CASE_ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ) -> str:
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def __A ( self ) -> str:
return self.user_data["username"]
@property
def __A ( self ) -> str:
return self.user_data["full_name"]
@property
def __A ( self ) -> str:
return self.user_data["biography"]
@property
def __A ( self ) -> str:
return self.user_data["business_email"]
@property
def __A ( self ) -> str:
return self.user_data["external_url"]
@property
def __A ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __A ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __A ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __A ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __A ( self ) -> bool:
return self.user_data["is_verified"]
@property
def __A ( self ) -> bool:
return self.user_data["is_private"]
def lowercase (SCREAMING_SNAKE_CASE_ : str = "github" ) -> str:
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(lowerCAmelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCAmelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = InstagramUser('''github''')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 365 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> bool:
SCREAMING_SNAKE_CASE = int(number**0.5 )
return number == sq * sq
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> tuple[int, int]:
SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE = x_den * y_den * z_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase (SCREAMING_SNAKE_CASE_ : int = 35 ) -> int:
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Fraction(0 )
SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE = x_den * y_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=-1
SCREAMING_SNAKE_CASE = x_num * y_num
SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 38 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['ConditionalDetrFeatureExtractor']
_snake_case = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 |
"""simple docstring"""
from __future__ import annotations
import time
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None ) -> List[str]:
_a : int = pos_x
_a : Union[str, Any] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : Tuple = goal_x
_a : int = goal_y
_a : str = parent
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] ) -> List[str]:
_a : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : Optional[int] = [self.start]
_a : Tuple = False
def _lowercase ( self : str ) -> Path | None:
while self.node_queue:
_a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(UpperCAmelCase__ )
_a : Tuple = self.get_successors(UpperCAmelCase__ )
for node in successors:
self.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node ) -> list[Node]:
_a : Optional[Any] = []
for action in delta:
_a : str = parent.pos_x + action[1]
_a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , UpperCAmelCase__ ) )
return successors
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Node | None ) -> Path:
_a : Dict = node
_a : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : Dict = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = False
def _lowercase ( self : Any ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[int] = True
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = current_bwd_node
_a : int = current_fwd_node
_a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> Path:
_a : str = self.fwd_bfs.retrace_path(UpperCAmelCase__ )
_a : List[Any] = self.bwd_bfs.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = BreadthFirstSearch(init, goal)
_snake_case = bfs.search()
_snake_case = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_snake_case = time.time()
_snake_case = BidirectionalBreadthFirstSearch(init, goal)
_snake_case = bd_bfs.search()
_snake_case = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 324 | 1 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Dict = 'fnet'
def __init__( self ,__UpperCAmelCase=3_20_00 ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=12 ,__UpperCAmelCase=30_72 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=4 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-12 ,__UpperCAmelCase=False ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=3 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,**__UpperCAmelCase ,) -> Any:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = use_tpu_fourier_optimizations
A__ = tpu_short_seq_length
| 154 | """simple docstring"""
from collections import namedtuple
__lowerCamelCase = namedtuple("from_to", "from_ to")
__lowerCamelCase = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_0_1, 10_00),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
"cubicyard": from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
"cubicfoot": from_to(0.0_2_8, 3_5.3_1_4_7),
"cup": from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(UpperCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(UpperCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
'''simple docstring'''
import os
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(grid[0] )
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_UpperCamelCase ):
for j in range(n_rows - 3 ):
_SCREAMING_SNAKE_CASE =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_SCREAMING_SNAKE_CASE =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_SCREAMING_SNAKE_CASE =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_SCREAMING_SNAKE_CASE =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_SCREAMING_SNAKE_CASE =max(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if max_product > largest:
_SCREAMING_SNAKE_CASE =max_product
return largest
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
with open(os.path.dirname(_UpperCamelCase ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
_SCREAMING_SNAKE_CASE =[[int(_UpperCamelCase ) for i in grid[j]] for j in range(len(_UpperCamelCase ) )]
return largest_product(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 47 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: list[int] , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[Any] = list(range(len(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [v / w for v, w in zip(_lowerCamelCase , _lowerCamelCase )]
index.sort(key=lambda _lowerCamelCase : ratio[i] , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : list[float] = [0] * len(_lowerCamelCase )
for i in index:
if weight[i] <= capacity:
__SCREAMING_SNAKE_CASE : str = 1
max_value += value[i]
capacity -= weight[i]
else:
__SCREAMING_SNAKE_CASE : int = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 0 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 1 |
'''simple docstring'''
from __future__ import annotations
class a__ :
def __init__( self : List[Any] , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = order
# a_{0} ... a_{k}
__lowerCamelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__lowerCamelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__lowerCamelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
__lowerCamelCase = [0.0] * self.order
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Dict , a : Optional[int] ):
"""simple docstring"""
if len(snake_case_ ) < self.order:
__lowerCamelCase = [1.0, *a_coeffs]
if len(snake_case_ ) != self.order + 1:
__lowerCamelCase = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
if len(snake_case_ ) != self.order + 1:
__lowerCamelCase = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
__lowerCamelCase = a_coeffs
__lowerCamelCase = b_coeffs
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int ):
"""simple docstring"""
__lowerCamelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__lowerCamelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__lowerCamelCase = self.input_history[:-1]
__lowerCamelCase = self.output_history[:-1]
__lowerCamelCase = sample
__lowerCamelCase = result
return result
| 67 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
lowercase__ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
lowercase__ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
lowercase__ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
lowercase__ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self : int ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def lowerCamelCase ( self : Optional[Any] , a_ : Tuple , a_ : Tuple , a_ : str=[1, 10, 1_00] , a_ : int=4 , a_ : List[Any]=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a_ ) as executor:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[str] = Counter()
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : str = defaultdict(a_ )
for task_id, (candidates, test_case) in enumerate(zip(a_ , a_ ) ):
for candidate in candidates:
lowerCAmelCase_ : Optional[Any] = candidate + '\n' + test_case
lowerCAmelCase_ : Any = (test_program, timeout, task_id, completion_id[task_id])
lowerCAmelCase_ : Tuple = executor.submit(a_ , *a_ )
futures.append(a_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a_ ):
lowerCAmelCase_ : List[str] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
lowerCAmelCase_ : List[Any] = [], []
for result in results.values():
result.sort()
lowerCAmelCase_ : str = [r[1]['passed'] for r in result]
total.append(len(a_ ) )
correct.append(sum(a_ ) )
lowerCAmelCase_ : List[Any] = np.array(a_ )
lowerCAmelCase_ : int = np.array(a_ )
lowerCAmelCase_ : Dict = k
lowerCAmelCase_ : List[str] = {f'''pass@{k}''': estimate_pass_at_k(a_ , a_ , a_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
def estimator(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase_ : List[Any] = itertools.repeat(_UpperCAmelCase , len(_UpperCAmelCase ) )
else:
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase_ : int = iter(_UpperCAmelCase )
return np.array([estimator(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , _UpperCAmelCase ) for n, c in zip(_UpperCAmelCase , _UpperCAmelCase )] )
| 359 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase_ : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
else:
lowerCAmelCase_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
lowerCAmelCase_ : List[str] = ["key_proj", "value_proj", "query_proj"]
lowerCAmelCase_ : Tuple = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowerCAmelCase_ : Dict = key.split("." )
if attributes[0] == "lm_head":
lowerCAmelCase_ : int = prophet
lowerCAmelCase_ : int = prophet_old
else:
lowerCAmelCase_ : str = prophet.prophetnet
lowerCAmelCase_ : int = prophet_old.model
lowerCAmelCase_ : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase_ : Tuple = mapping[attribute]
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0:
lowerCAmelCase_ : Optional[Any] = attribute
elif hasattr(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase_ : str = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowerCAmelCase_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase_ : Tuple = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowerCAmelCase_ : Optional[int] = True
break
elif attribute in special_keys and hasattr(__UpperCamelCase , "in_proj_weight" ):
lowerCAmelCase_ : List[Any] = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase_ : List[str] = getattr(__UpperCamelCase , __UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase_ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase_ : List[str] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase_ : Any = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase_ : int = True
break
if attribute.isdigit():
lowerCAmelCase_ : Tuple = model[int(__UpperCamelCase )]
lowerCAmelCase_ : Tuple = old_model[int(__UpperCamelCase )]
else:
lowerCAmelCase_ : Optional[int] = getattr(__UpperCamelCase , __UpperCamelCase )
if old_attribute == "":
lowerCAmelCase_ : Tuple = old_model
else:
if not hasattr(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowerCAmelCase_ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 161 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase__( __A , __A ):
@register_to_config
def __init__( self ,__UpperCAmelCase = 1_28 ,__UpperCAmelCase = 2_56 ,__UpperCAmelCase = 2_0_0_0.0 ,__UpperCAmelCase = 7_68 ,__UpperCAmelCase = 12 ,__UpperCAmelCase = 12 ,__UpperCAmelCase = 64 ,__UpperCAmelCase = 20_48 ,__UpperCAmelCase = 0.1 ,) -> Optional[Any]:
super().__init__()
A__ = nn.Sequential(
nn.Linear(__UpperCAmelCase ,d_model * 4 ,bias=__UpperCAmelCase ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=__UpperCAmelCase ) ,nn.SiLU() ,)
A__ = nn.Embedding(__UpperCAmelCase ,__UpperCAmelCase )
A__ = False
A__ = nn.Linear(__UpperCAmelCase ,__UpperCAmelCase ,bias=__UpperCAmelCase )
A__ = nn.Dropout(p=__UpperCAmelCase )
A__ = nn.ModuleList()
for lyr_num in range(__UpperCAmelCase ):
# FiLM conditional T5 decoder
A__ = DecoderLayer(d_model=__UpperCAmelCase ,d_kv=__UpperCAmelCase ,num_heads=__UpperCAmelCase ,d_ff=__UpperCAmelCase ,dropout_rate=__UpperCAmelCase )
self.decoders.append(__UpperCAmelCase )
A__ = TaLayerNorm(__UpperCAmelCase )
A__ = nn.Dropout(p=__UpperCAmelCase )
A__ = nn.Linear(__UpperCAmelCase ,__UpperCAmelCase ,bias=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
A__ = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ , A__ , A__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
A__ = self.conditioning_emb(__UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A__ = torch.broadcast_to(
torch.arange(__UpperCAmelCase ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
A__ = self.position_encoding(__UpperCAmelCase )
A__ = self.continuous_inputs_projection(__UpperCAmelCase )
inputs += position_encodings
A__ = self.dropout(__UpperCAmelCase )
# decoder: No padding present.
A__ = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A__ = [(x, self.encoder_decoder_mask(__UpperCAmelCase ,__UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A__ = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
A__ = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
A__ = lyr(
__UpperCAmelCase ,conditioning_emb=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,)[0]
A__ = self.decoder_norm(__UpperCAmelCase )
A__ = self.post_dropout(__UpperCAmelCase )
A__ = self.spec_out(__UpperCAmelCase )
return spec_out
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=1e-6 ) -> Optional[Any]:
super().__init__()
A__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCAmelCase ,d_kv=__UpperCAmelCase ,num_heads=__UpperCAmelCase ,dropout_rate=__UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCAmelCase ,d_kv=__UpperCAmelCase ,num_heads=__UpperCAmelCase ,dropout_rate=__UpperCAmelCase ,layer_norm_epsilon=__UpperCAmelCase ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCAmelCase ,d_ff=__UpperCAmelCase ,dropout_rate=__UpperCAmelCase ,layer_norm_epsilon=__UpperCAmelCase ) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,) -> Any:
A__ = self.layer[0](
__UpperCAmelCase ,conditioning_emb=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,)
if encoder_hidden_states is not None:
A__ = torch.where(encoder_attention_mask > 0 ,0 ,-1e10 ).to(
encoder_hidden_states.dtype )
A__ = self.layer[1](
__UpperCAmelCase ,key_value_states=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,)
# Apply Film Conditional Feed Forward layer
A__ = self.layer[-1](__UpperCAmelCase ,__UpperCAmelCase )
return (hidden_states,)
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
A__ = TaLayerNorm(__UpperCAmelCase )
A__ = TaFiLMLayer(in_features=d_model * 4 ,out_features=__UpperCAmelCase )
A__ = Attention(query_dim=__UpperCAmelCase ,heads=__UpperCAmelCase ,dim_head=__UpperCAmelCase ,out_bias=__UpperCAmelCase ,scale_qk=__UpperCAmelCase )
A__ = nn.Dropout(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,) -> Optional[int]:
# pre_self_attention_layer_norm
A__ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
A__ = self.FiLMLayer(__UpperCAmelCase ,__UpperCAmelCase )
# Self-attention block
A__ = self.attention(__UpperCAmelCase )
A__ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
super().__init__()
A__ = Attention(query_dim=__UpperCAmelCase ,heads=__UpperCAmelCase ,dim_head=__UpperCAmelCase ,out_bias=__UpperCAmelCase ,scale_qk=__UpperCAmelCase )
A__ = TaLayerNorm(__UpperCAmelCase ,eps=__UpperCAmelCase )
A__ = nn.Dropout(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,) -> int:
A__ = self.layer_norm(__UpperCAmelCase )
A__ = self.attention(
__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,attention_mask=attention_mask.squeeze(1 ) ,)
A__ = hidden_states + self.dropout(__UpperCAmelCase )
return layer_output
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
super().__init__()
A__ = TaDenseGatedActDense(d_model=__UpperCAmelCase ,d_ff=__UpperCAmelCase ,dropout_rate=__UpperCAmelCase )
A__ = TaFiLMLayer(in_features=d_model * 4 ,out_features=__UpperCAmelCase )
A__ = TaLayerNorm(__UpperCAmelCase ,eps=__UpperCAmelCase )
A__ = nn.Dropout(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Any:
A__ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
A__ = self.film(__UpperCAmelCase ,__UpperCAmelCase )
A__ = self.DenseReluDense(__UpperCAmelCase )
A__ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
super().__init__()
A__ = nn.Linear(__UpperCAmelCase ,__UpperCAmelCase ,bias=__UpperCAmelCase )
A__ = nn.Linear(__UpperCAmelCase ,__UpperCAmelCase ,bias=__UpperCAmelCase )
A__ = nn.Linear(__UpperCAmelCase ,__UpperCAmelCase ,bias=__UpperCAmelCase )
A__ = nn.Dropout(__UpperCAmelCase )
A__ = NewGELUActivation()
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
A__ = self.act(self.wi_a(__UpperCAmelCase ) )
A__ = self.wi_a(__UpperCAmelCase )
A__ = hidden_gelu * hidden_linear
A__ = self.dropout(__UpperCAmelCase )
A__ = self.wo(__UpperCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=1e-6 ) -> Any:
super().__init__()
A__ = nn.Parameter(torch.ones(__UpperCAmelCase ) )
A__ = eps
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=__UpperCAmelCase )
A__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase__( nn.Module ):
def snake_case__ ( self ,__UpperCAmelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(__UpperCAmelCase ,3.0 )) ))
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
super().__init__()
A__ = nn.Linear(__UpperCAmelCase ,out_features * 2 ,bias=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = self.scale_bias(__UpperCAmelCase )
A__ , A__ = torch.chunk(__UpperCAmelCase ,2 ,-1 )
A__ = x * (1 + scale) + shift
return x
| 221 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = b.T
A__ = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
A__ = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
A__ = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
A__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = x.reshape(-1 , 3 )
A__ = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Tuple = ['pixel_values']
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'height': 2_56, 'width': 2_56}
A__ = get_size_dict(__UpperCAmelCase )
A__ = np.array(__UpperCAmelCase ) if clusters is not None else None
A__ = do_resize
A__ = size
A__ = resample
A__ = do_normalize
A__ = do_color_quantize
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__UpperCAmelCase ,size=(size['height'], size['width']) ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,) -> np.ndarray:
A__ = rescale(image=__UpperCAmelCase ,scale=1 / 1_2_7.5 ,data_format=__UpperCAmelCase )
A__ = image - 1
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase )
A__ = resample if resample is not None else self.resample
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A__ = clusters if clusters is not None else self.clusters
A__ = np.array(__UpperCAmelCase )
A__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__UpperCAmelCase ) for image in images]
if do_color_quantize:
A__ = [to_channel_dimension_format(__UpperCAmelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A__ = np.array(__UpperCAmelCase )
A__ = color_quantize(__UpperCAmelCase ,__UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A__ = images.shape[0]
A__ = images.reshape(__UpperCAmelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A__ = list(__UpperCAmelCase )
else:
A__ = [to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase ) for image in images]
A__ = {'input_ids': images}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221 | 1 |
"""simple docstring"""
from manim import *
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : List[str] = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Optional[int] = Text("CPU" , font_size=24 )
UpperCAmelCase_ : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
UpperCAmelCase_ : Optional[int] = [mem.copy() for i in range(4 )]
UpperCAmelCase_ : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Any = Text("GPU" , font_size=24 )
UpperCAmelCase_ : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
UpperCAmelCase_ : int = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Optional[int] = Text("Model" , font_size=24 )
UpperCAmelCase_ : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
UpperCAmelCase_ : Dict = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase_ : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
UpperCAmelCase_ : Dict = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase_ : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ : Dict = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase_ : Union[str, Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : str = []
for i, rect in enumerate(lowercase_ ):
UpperCAmelCase_ : int = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
UpperCAmelCase_ : str = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 352 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
A__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A__ = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A__ = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase ( self: str ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
A__ = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (accelerator.state.process_index + 2, 1_0)
SCREAMING_SNAKE_CASE_ : Dict = torch.randint(0, 1_0, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE_ : int = ''
SCREAMING_SNAKE_CASE_ : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE_ : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE_ : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 335 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ = '''src/diffusers'''
lowerCamelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase__ = spec.loader.load_module()
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
return line.startswith(lowercase_ ) or len(lowercase_ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowercase_ ) is not None
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = object_name.split("." )
_UpperCamelCase = 0
# First let's find the module where our object lives.
_UpperCamelCase = parts[i]
while i < len(lowercase_ ) and not os.path.isfile(os.path.join(lowercase_ , f'{module}.py' ) ):
i += 1
if i < len(lowercase_ ):
_UpperCamelCase = os.path.join(lowercase_ , parts[i] )
if i >= len(lowercase_ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowercase_ , f'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCamelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCamelCase = ""
_UpperCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase_ ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase_ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCamelCase = line_index
while line_index < len(lowercase_ ) and _should_continue(lines[line_index] , lowercase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
return "".join(lowercase_ )
lowerCamelCase__ = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCamelCase__ = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCamelCase__ = re.compile(R'''<FILL\s+[^>]*>''')
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = code.split("\n" )
_UpperCamelCase = 0
while idx < len(lowercase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase_ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = len(get_indent(lowercase_ ) ) > 0
if has_indent:
_UpperCamelCase = f'class Bla:\n{code}'
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowercase_ )
_UpperCamelCase = black.format_str(lowercase_ , mode=lowercase_ )
_UpperCamelCase , _UpperCamelCase = style_docstrings_in_code(lowercase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCAmelCase__ ( a__ , a__=False ) ->str:
'''simple docstring'''
with open(lowercase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = []
_UpperCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase_ ):
_UpperCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = search.groups()
_UpperCamelCase = find_code_in_diffusers(lowercase_ )
_UpperCamelCase = get_indent(lowercase_ )
_UpperCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCamelCase = theoretical_indent
_UpperCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCamelCase = True
while line_index < len(lowercase_ ) and should_continue:
line_index += 1
if line_index >= len(lowercase_ ):
break
_UpperCamelCase = lines[line_index]
_UpperCamelCase = _should_continue(lowercase_ , lowercase_ ) and re.search(f'^{indent}# End copy' , lowercase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
_UpperCamelCase = "".join(lowercase_ )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCamelCase = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowercase_ ) is None]
_UpperCamelCase = "\n".join(lowercase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase_ ) > 0:
_UpperCamelCase = replace_pattern.replace("with" , "" ).split("," )
_UpperCamelCase = [_re_replace_pattern.search(lowercase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pattern.groups()
_UpperCamelCase = re.sub(lowercase_ , lowercase_ , lowercase_ )
if option.strip() == "all-casing":
_UpperCamelCase = re.sub(obja.lower() , obja.lower() , lowercase_ )
_UpperCamelCase = re.sub(obja.upper() , obja.upper() , lowercase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCamelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCamelCase = start_index + 1
if overwrite and len(lowercase_ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(lowercase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase_ )
return diffs
def lowerCAmelCase__ ( a__ = False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = glob.glob(os.path.join(lowercase_ , "**/*.py" ) , recursive=lowercase_ )
_UpperCamelCase = []
for filename in all_files:
_UpperCamelCase = is_copy_consistent(lowercase_ , lowercase_ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowercase_ ) > 0:
_UpperCamelCase = "\n".join(lowercase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 356 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
_UpperCamelCase = AutoTokenizer.from_pretrained("google/mt5-small")
_UpperCamelCase = tokenizer("Hello there" , return_tensors="np").input_ids
_UpperCamelCase = tokenizer("Hi I am" , return_tensors="np").input_ids
_UpperCamelCase = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCamelCase = model(lowercase_ , decoder_input_ids=lowercase_).logits
_UpperCamelCase = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1])).mean()
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 63 | 0 |
from typing import Any
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Any , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = None
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.head
while temp is not None:
print(temp.data , end=""" """ )
UpperCamelCase = temp.next
print()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = self.head
UpperCamelCase = new_node
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
UpperCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase = node_a.next
UpperCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase = node_a.next
if node_a is None or node_a is None:
return
UpperCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 343 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = None
ops.enable_eager_execution_internal()
lowerCAmelCase_ : str = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase_ : Dict = tf.config.list_logical_devices(device_type='CPU' )
lowerCAmelCase_ : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase_ : Union[str, Any] = GradientAccumulator()
lowerCAmelCase_ : int = tf.Variable([4.0, 3.0] )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = create_optimizer(5E-5 , 1_0 , 5 )
lowerCAmelCase_ : Union[str, Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE_ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE_ : Optional[int] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
with strategy.scope():
lowerCAmelCase_ : Tuple = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ )
local_variables[0].assign(SCREAMING_SNAKE_CASE_ )
local_variables[1].assign(SCREAMING_SNAKE_CASE_ )
strategy.run(SCREAMING_SNAKE_CASE_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE_ )
def _check_local_values(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 224 | 0 |
from __future__ import annotations
_UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
__lowerCAmelCase : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE ) )
] # the reference grid
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE ) )
] # the action grid
__lowerCAmelCase : Dict = init[0]
__lowerCAmelCase : Dict = init[1]
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCAmelCase : int = [[f, g, x, y]]
__lowerCAmelCase : List[str] = False # flag that is set when search is complete
__lowerCAmelCase : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCAmelCase : Optional[Any] = cell.pop()
__lowerCAmelCase : str = next_cell[2]
__lowerCAmelCase : List[Any] = next_cell[3]
__lowerCAmelCase : Any = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCAmelCase : Dict = True
else:
for i in range(len(SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
__lowerCAmelCase : Union[str, Any] = x + DIRECTIONS[i][0]
__lowerCAmelCase : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCAmelCase : Optional[int] = g + cost
__lowerCAmelCase : int = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : int = i
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : List[str] = goal[0]
__lowerCAmelCase : Dict = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCAmelCase : List[str] = x - DIRECTIONS[action[x][y]][0]
__lowerCAmelCase : Tuple = y - DIRECTIONS[action[x][y]][1]
__lowerCAmelCase : Optional[int] = xa
__lowerCAmelCase : List[Any] = ya
invpath.append([x, y] )
__lowerCAmelCase : str = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
_UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
_UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
_UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
_UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_UpperCAmelCase = 99
_UpperCAmelCase , _UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i]) | 232 |
from math import isqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> list[int]:
__lowerCAmelCase : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 10**8 ) -> int:
__lowerCAmelCase : int = calculate_prime_numbers(max_number // 2 )
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''') | 232 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_UpperCamelCase : Optional[int] = False
class a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase = torch.manual_seed(0 )
lowercase = pipe(
image=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 220 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_UpperCamelCase : Tuple = int(input('Enter number: ').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 220 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A_ :
@property
def lowercase ( self : Optional[int] ):
return self.get_dummy_input()
@property
def lowercase ( self : List[str] ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def lowercase ( self : str , snake_case_ : Dict=True , snake_case_ : int=False , snake_case_ : Any=False , snake_case_ : Union[str, Any]=False , ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3_2
_UpperCAmelCase = (3_2, 3_2)
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = torch.device(snake_case_ )
_UpperCAmelCase = (batch_size, num_channels) + sizes
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ )
_UpperCAmelCase = {"hidden_states": hidden_states}
if include_temb:
_UpperCAmelCase = 1_2_8
_UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=snake_case_ , device=snake_case_ )
if include_res_hidden_states_tuple:
_UpperCAmelCase = torch.manual_seed(1 )
_UpperCAmelCase = (randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ ),)
if include_encoder_hidden_states:
_UpperCAmelCase = floats_tensor((batch_size, 3_2, 3_2) ).to(snake_case_ )
if include_skip_sample:
_UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=snake_case_ , device=snake_case_ )
return dummy_input
def lowercase ( self : List[str] ):
_UpperCAmelCase = {
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
_UpperCAmelCase = 3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : int , snake_case_ : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**snake_case_ )
unet_block.to(snake_case_ )
unet_block.eval()
with torch.no_grad():
_UpperCAmelCase = unet_block(**snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCAmelCase = output[0, -1, -3:, -3:]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
assert torch_all_close(output_slice.flatten() , snake_case_ , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def lowercase ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(**snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = output[0]
_UpperCAmelCase = torch.device(snake_case_ )
_UpperCAmelCase = randn_tensor(output.shape , device=snake_case_ )
_UpperCAmelCase = torch.nn.functional.mse_loss(snake_case_ , snake_case_ )
loss.backward()
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 324 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ : int = """scheduler_config.json"""
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 5
@dataclass
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 42
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE_ = ['dtype']
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] ,__lowerCamelCase : Dict[str, Any] = None ,__lowerCamelCase : Optional[str] = None ,__lowerCamelCase : str=False ,**__lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
a , a = cls.load_config(
pretrained_model_name_or_path=__lowerCamelCase ,subfolder=__lowerCamelCase ,return_unused_kwargs=__lowerCamelCase ,**__lowerCamelCase ,)
a , a = cls.from_config(__lowerCamelCase ,return_unused_kwargs=__lowerCamelCase ,**__lowerCamelCase )
if hasattr(__lowerCamelCase ,'''create_state''' ) and getattr(__lowerCamelCase ,'''has_state''' ,__lowerCamelCase ):
a = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Union[str, os.PathLike] ,__lowerCamelCase : bool = False ,**__lowerCamelCase : List[Any] ):
'''simple docstring'''
self.save_config(save_directory=__lowerCamelCase ,push_to_hub=__lowerCamelCase ,**__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] ):
'''simple docstring'''
a = list(set([cls.__name__] + cls._compatibles ) )
a = importlib.import_module(__name__.split('''.''' )[0] )
a = [
getattr(__lowerCamelCase ,__lowerCamelCase ) for c in compatible_classes_str if hasattr(__lowerCamelCase ,__lowerCamelCase )
]
return compatible_classes
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> jnp.ndarray:
"""simple docstring"""
assert len(snake_case_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case_ ) - x.ndim) ), snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=0.999, snake_case_=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(snake_case_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
a = []
for i in range(snake_case_ ):
a = i / num_diffusion_timesteps
a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(snake_case_ ) / alpha_bar(snake_case_ ), snake_case_ ) )
return jnp.array(snake_case_, dtype=snake_case_ )
@flax.struct.dataclass
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = scheduler.config
if config.trained_betas is not None:
a = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
a = 1.0 - betas
a = jnp.cumprod(__lowerCamelCase ,axis=0 )
return cls(
alphas=__lowerCamelCase ,betas=__lowerCamelCase ,alphas_cumprod=__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = state.alphas_cumprod
a = alphas_cumprod[timesteps] ** 0.5
a = sqrt_alpha_prod.flatten()
a = broadcast_to_shape_from_left(snake_case_, original_samples.shape )
a = (1 - alphas_cumprod[timesteps]) ** 0.5
a = sqrt_one_minus_alpha_prod.flatten()
a = broadcast_to_shape_from_left(snake_case_, original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple:
"""simple docstring"""
a , a = get_sqrt_alpha_prod(snake_case_, snake_case_, snake_case_, snake_case_ )
a = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a , a = get_sqrt_alpha_prod(snake_case_, snake_case_, snake_case_, snake_case_ )
a = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 330 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
stooge(snake_case_, 0, len(snake_case_ ) - 1 )
return arr
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a , a = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case_, snake_case_, (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case_, i + t, (snake_case_) )
# Recursively sort first 2/3 elements
stooge(snake_case_, snake_case_, (h - t) )
if __name__ == "__main__":
UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 330 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
__A : Dict = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 260 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : Union[str, Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__, id=UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : Optional[int] =0
# Doctest custom flag to ignore output.
a_ = doctest.register_optionflag('IGNORE_RESULT')
a_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : str , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Dict ) -> List[str]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __lowercase , __lowercase , __lowercase )
a_ = CustomOutputChecker
a_ = HfDoctestModule
a_ = HfDocTestParser | 222 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 5_1_2}
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =set()
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =char
SCREAMING_SNAKE_CASE__ : Any =set(UpperCamelCase__ )
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : List[str]="__start__" , __lowercase : Union[str, Any]="__end__" , __lowercase : str="__unk__" , __lowercase : Union[str, Any]="__null__" , **__lowercase : List[str] , ) -> Optional[int]:
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Any =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict ={v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : int =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : List[Any] =[tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ : int =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict ={}
@property
def __magic_name__ ( self : Any ) -> int:
return len(self.encoder )
def __magic_name__ ( self : Tuple ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> str:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''([.,!?()])''' , r''' \1''' , __lowercase )
SCREAMING_SNAKE_CASE__ : int =re.sub('''(\')''' , r''' \1 ''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =re.sub(r'''\s{2,}''' , ''' ''' , __lowercase )
if "\n" in token:
SCREAMING_SNAKE_CASE__ : List[str] =token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE__ : Dict =token.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Dict =[]
for token in tokens:
if not len(__lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] =token.lower()
SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
SCREAMING_SNAKE_CASE__ : int =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =bigram
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =0
while i < len(__lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] =word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Optional[int] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : int =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : int =new_word
if len(__lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Any =get_pairs(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''@@ '''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =word[:-4]
SCREAMING_SNAKE_CASE__ : str =word
words.append(__lowercase )
return " ".join(__lowercase )
def __magic_name__ ( self : str , __lowercase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : List[Any] =re.findall(r'''\S+\n?''' , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : int =token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , __lowercase : int ) -> str:
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : List[Any] , __lowercase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : str =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : int =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : List[str] =0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Any =token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file | 222 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase_ = Features({"""audio""": Audio()} )
UpperCAmelCase_ = Features({"""labels""": ClassLabel} )
UpperCAmelCase_ = "audio"
UpperCAmelCase_ = "labels"
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[Any] ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase__ = copy.deepcopy(self )
UpperCAmelCase__ = self.label_schema.copy()
UpperCAmelCase__ = features[self.label_column]
UpperCAmelCase__ = label_schema
return task_template
@property
def UpperCAmelCase_ ( self :Tuple ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 169 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , **lowerCamelCase :Dict ) -> int:
super().__init__(**lowerCamelCase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase )
def UpperCAmelCase_ ( self :Any , **lowerCamelCase :int ) -> int:
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , *lowerCamelCase :str , lowerCamelCase :Optional[Any]=None , lowerCamelCase :int=None , **lowerCamelCase :Optional[Any] ) -> str:
return super().__call__(lowerCamelCase , *lowerCamelCase , num_workers=lowerCamelCase , batch_size=lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :str , lowerCamelCase :Optional[Any]=64 , lowerCamelCase :int = 0 , lowerCamelCase :float = 512 / 1500 , lowerCamelCase :Optional[int] = 32 , lowerCamelCase :Optional[int] = 1 , ) -> Any:
UpperCAmelCase__ = load_image(lowerCamelCase )
UpperCAmelCase__ = self.image_processor.size["longest_edge"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.image_processor(images=lowerCamelCase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(lowerCamelCase , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[str] , lowerCamelCase :Union[str, Any]=0.88 , lowerCamelCase :Optional[Any]=0.95 , lowerCamelCase :Tuple=0 , lowerCamelCase :Union[str, Any]=1 , ) -> Dict:
UpperCAmelCase__ = model_inputs.pop("input_boxes" )
UpperCAmelCase__ = model_inputs.pop("is_last" )
UpperCAmelCase__ = model_inputs.pop("original_sizes" ).tolist()
UpperCAmelCase__ = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCAmelCase__ = self.model(**lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs["pred_masks"]
UpperCAmelCase__ = self.image_processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , binarize=lowerCamelCase )
UpperCAmelCase__ = model_outputs["iou_scores"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ ( self :int , lowerCamelCase :str , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :int=0.7 , ) -> Union[str, Any]:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = defaultdict(lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 169 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 | import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 50 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class _snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = UNetaDModel
A__ : List[Any] = '''sample'''
@property
def A__ ( self: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Dict = (32, 32)
UpperCAmelCase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self: str ) -> str:
return (3, 32, 32)
@property
def A__ ( self: Tuple ) -> Optional[int]:
return (3, 32, 32)
def A__ ( self: Dict ) -> Any:
UpperCAmelCase_ : str = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
UpperCAmelCase_ : List[str] = self.dummy_input
return init_dict, inputs_dict
class _snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = UNetaDModel
A__ : Union[str, Any] = '''sample'''
@property
def A__ ( self: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : str = (32, 32)
UpperCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
UpperCAmelCase_ : Optional[Any] = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self: int ) -> Optional[int]:
return (4, 32, 32)
@property
def A__ ( self: Optional[int] ) -> List[Any]:
return (4, 32, 32)
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : List[Any] = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
UpperCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Tuple ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(__snake_case )
UpperCAmelCase_ : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" )
def A__ ( self: List[Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=__snake_case )
model.to(__snake_case )
UpperCAmelCase_ : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" )
def A__ ( self: Tuple ) -> List[Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=__snake_case )
model_accelerate.to(__snake_case )
model_accelerate.eval()
UpperCAmelCase_ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : List[str] = noise.to(__snake_case )
UpperCAmelCase_ : Tuple = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
UpperCAmelCase_ : str = model_accelerate(__snake_case ,__snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" ,output_loading_info=__snake_case ,low_cpu_mem_usage=__snake_case )
model_normal_load.to(__snake_case )
model_normal_load.eval()
UpperCAmelCase_ : Optional[Any] = model_normal_load(__snake_case ,__snake_case )["""sample"""]
assert torch_all_close(__snake_case ,__snake_case ,rtol=1e-3 )
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__snake_case )
UpperCAmelCase_ : int = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : Tuple = noise.to(__snake_case )
UpperCAmelCase_ : int = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
with torch.no_grad():
UpperCAmelCase_ : int = model(__snake_case ,__snake_case ).sample
UpperCAmelCase_ : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase_ : Any = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1e-3 ) )
class _snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = UNetaDModel
A__ : Optional[int] = '''sample'''
@property
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int]=(32, 32) ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
UpperCAmelCase_ : Optional[Any] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self: Optional[Any] ) -> int:
return (3, 32, 32)
@property
def A__ ( self: Optional[int] ) -> str:
return (3, 32, 32)
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : str = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
UpperCAmelCase_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
@slow
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(__snake_case )
UpperCAmelCase_ : str = self.dummy_input
UpperCAmelCase_ : Tuple = floats_tensor((4, 3) + (256, 256) ).to(__snake_case )
UpperCAmelCase_ : Optional[Any] = noise
UpperCAmelCase_ : Optional[Any] = model(**__snake_case )
assert image is not None, "Make sure output is not None"
@slow
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__snake_case )
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Any = (256, 256)
UpperCAmelCase_ : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
UpperCAmelCase_ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(__snake_case )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(__snake_case ,__snake_case ).sample
UpperCAmelCase_ : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase_ : Dict = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1e-2 ) )
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__snake_case )
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : str = (32, 32)
UpperCAmelCase_ : Dict = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
UpperCAmelCase_ : List[Any] = torch.tensor(batch_size * [1e-4] ).to(__snake_case )
with torch.no_grad():
UpperCAmelCase_ : int = model(__snake_case ,__snake_case ).sample
UpperCAmelCase_ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase_ : int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1e-2 ) )
def A__ ( self: Optional[int] ) -> List[Any]:
# not required for this model
pass
| 345 |
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float(moles / volume ) * nfactor )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240 | 0 |
from __future__ import annotations
from collections import deque
class lowerCamelCase_ :
def __init__( self : List[str] ,__lowerCamelCase : list[str] ):
'''simple docstring'''
a = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__lowerCamelCase )
self.set_fail_transitions()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : int ,__lowerCamelCase : str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : str ):
'''simple docstring'''
a = 0
for character in keyword:
a = self.find_next_state(__lowerCamelCase ,__lowerCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
a = len(self.adlist ) - 1
else:
a = next_state
self.adlist[current_state]["output"].append(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = deque()
for node in self.adlist[0]["next_states"]:
q.append(__lowerCamelCase )
a = 0
while q:
a = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__lowerCamelCase )
a = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__lowerCamelCase ,self.adlist[child]['''value'''] ) is None
and state != 0
):
a = self.adlist[state]['''fail_state''']
a = self.find_next_state(
__lowerCamelCase ,self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
a = 0
a = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = {} # returns a dict with keywords and list of its occurrences
a = 0
for i in range(len(__lowerCamelCase ) ):
while (
self.find_next_state(__lowerCamelCase ,string[i] ) is None
and current_state != 0
):
a = self.adlist[current_state]['''fail_state''']
a = self.find_next_state(__lowerCamelCase ,string[i] )
if next_state is None:
a = 0
else:
a = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a = []
result[key].append(i - len(__lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCamelCase__ : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
a = getattr(snake_case_, snake_case_ )
if weight_type is not None:
a = getattr(snake_case_, snake_case_ ).shape
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = []
a = fairseq_model.state_dict()
a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', )
a = True
else:
for key, mapped_key in MAPPING.items():
a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
a = True
if "*" in mapped_key:
a = name.split(snake_case_ )[0].split('''.''' )[-2]
a = mapped_key.replace('''*''', snake_case_ )
if "weight_g" in name:
a = '''weight_g'''
elif "weight_v" in name:
a = '''weight_v'''
elif "bias" in name:
a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = '''weight'''
else:
a = None
set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = full_name.split('''conv_layers.''' )[-1]
a = name.split('''.''' )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
a = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
a = UniSpeechSatConfig()
a = ''''''
if is_finetuned:
a = UniSpeechSatForCTC(snake_case_ )
else:
a = UniSpeechSatForPreTraining(snake_case_ )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
a = model[0].eval()
recursively_load_weights(snake_case_, snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase__ : int = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330 | 0 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE =mf_knapsack(i - 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE =max(
mf_knapsack(i - 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ), mf_knapsack(i - 1, _lowerCAmelCase, _lowerCAmelCase, j - wt[i - 1] ) + val[i - 1], )
SCREAMING_SNAKE_CASE =val
return f[i][j]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE =dp[i - 1][w_]
return dp[n][w_], dp
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if not (isinstance(_lowerCAmelCase, (list, tuple) ) and isinstance(_lowerCAmelCase, (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
SCREAMING_SNAKE_CASE =len(_lowerCAmelCase )
if num_items != len(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE =(
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(_lowerCAmelCase )} values'
)
raise ValueError(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
if not isinstance(wt[i], _lowerCAmelCase ):
SCREAMING_SNAKE_CASE =(
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(_lowerCAmelCase )
SCREAMING_SNAKE_CASE =knapsack(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
SCREAMING_SNAKE_CASE =set()
_construct_solution(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
return optimal_val, example_optional_set
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCAmelCase, _lowerCAmelCase, i - 1, _lowerCAmelCase, _lowerCAmelCase )
else:
optimal_set.add(_lowerCAmelCase )
_construct_solution(_lowerCAmelCase, _lowerCAmelCase, i - 1, j - wt[i - 1], _lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase =[3, 2, 4, 4]
_lowerCamelCase =[4, 3, 2, 3]
_lowerCamelCase =4
_lowerCamelCase =6
_lowerCamelCase =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCamelCase =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCamelCase =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 334 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """MCTCTFeatureExtractor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : List[str] ) -> str:
super().__init__(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = self.feature_extractor
UpperCAmelCase : Union[str, Any] = False
def __call__( self : Any , *__snake_case : List[str] , **__snake_case : Any ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
UpperCAmelCase : int = kwargs.pop('''raw_speech''' )
else:
UpperCAmelCase : Union[str, Any] = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __snake_case )
UpperCAmelCase : Dict = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : Any = args[0]
UpperCAmelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
UpperCAmelCase : List[str] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
UpperCAmelCase : int = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase : str = encodings['''input_ids''']
return inputs
def A ( self : List[Any] , *__snake_case : List[Any] , **__snake_case : List[Any] ) -> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A ( self : List[Any] , *__snake_case : int , **__snake_case : Optional[int] ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = kwargs.pop('''input_features''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''labels''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : List[str] = args[0]
UpperCAmelCase : List[Any] = args[1:]
if input_features is not None:
UpperCAmelCase : Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
if labels is not None:
UpperCAmelCase : Optional[int] = self.tokenizer.pad(__snake_case , **__snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase : List[str] = labels['''input_ids''']
return input_features
def A ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def A ( self : Any ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = self.tokenizer
yield
UpperCAmelCase : Tuple = self.feature_extractor
UpperCAmelCase : List[Any] = False
| 23 | 0 |
from itertools import product
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Union[str, Any] = sides_number
UpperCamelCase_ : Union[str, Any] = max_face_number * dice_number
UpperCamelCase_ : Tuple = [0] * (max_total + 1)
UpperCamelCase_ : Union[str, Any] = 1
UpperCamelCase_ : List[Any] = range(lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(lowerCamelCase , repeat=lowerCamelCase ):
UpperCamelCase_ : Optional[int] = sum(lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __lowercase ( ):
UpperCamelCase_ : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCamelCase_ : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCamelCase_ : str = 0
UpperCamelCase_ : List[Any] = 9
UpperCamelCase_ : Any = 4 * 9
UpperCamelCase_ : str = 6
for peter_total in range(lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCamelCase_ : List[str] = (4**9) * (6**6)
UpperCamelCase_ : str = peter_wins_count / total_games_number
UpperCamelCase_ : List[Any] = round(lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359 | from manim import *
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : str = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ : int = [mem.copy() for i in range(6 )]
UpperCamelCase_ : List[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase_ : Dict = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : List[str] = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : int = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : int = Text('CPU' , font_size=2_4 )
UpperCamelCase_ : List[str] = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
UpperCamelCase_ : Union[str, Any] = [mem.copy() for i in range(1 )]
UpperCamelCase_ : Dict = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : Union[str, Any] = Text('GPU' , font_size=2_4 )
UpperCamelCase_ : Optional[Any] = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.align_to(snake_case , snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case )
UpperCamelCase_ : int = [mem.copy() for i in range(6 )]
UpperCamelCase_ : int = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : Tuple = Text('Model' , font_size=2_4 )
UpperCamelCase_ : Dict = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) , )
UpperCamelCase_ : Union[str, Any] = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=2_4 , )
UpperCamelCase_ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ : Dict = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=2.5 ) , Write(snake_case ) , Write(snake_case ) )
self.add(snake_case )
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Tuple = []
for i, rect in enumerate(snake_case ):
UpperCamelCase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
cpu_target.move_to(snake_case )
cpu_target.generate_target()
UpperCamelCase_ : int = 0.46 / 4
UpperCamelCase_ : Tuple = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case , buff=0.0 )
cpu_targs.append(snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case ) )
second_animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(*snake_case )
self.wait()
| 50 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Dict , lowercase : Optional[int]=None , lowercase : List[str]=True , lowercase : Dict=None , **lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = parent
_snake_case = config_class
_snake_case = has_text_modality
_snake_case = kwargs
_snake_case = common_properties
def A ( self : int ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
_snake_case = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase , lowercase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase ):
try:
setattr(lowercase , lowercase , lowercase )
self.parent.assertEqual(
getattr(lowercase , lowercase ) , lowercase , msg=f'''`{name} value {idx} expected, but was {getattr(lowercase , lowercase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase ):
try:
_snake_case = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase , lowercase ) , lowercase , msg=f'''`{name} value {idx} expected, but was {getattr(lowercase , lowercase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
_snake_case = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(lowercase , 'config.json' )
config_first.to_json_file(lowercase )
_snake_case = self.config_class.from_json_file(lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase )
_snake_case = self.config_class.from_pretrained(lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
_snake_case = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(lowercase , lowercase )
config_first.save_pretrained(lowercase )
_snake_case = self.config_class.from_pretrained(lowercase , subfolder=lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_snake_case = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def A ( self : List[str] ):
'''simple docstring'''
if self.config_class.is_composition:
return
_snake_case = self.config_class()
self.parent.assertIsNotNone(lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = copy.deepcopy(lowercase )
_snake_case = self.config_class(**lowercase )
_snake_case = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(lowercase , lowercase ) != value:
wrong_values.append((key, getattr(lowercase , lowercase ), value) )
if len(lowercase ) > 0:
_snake_case = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def A ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init() | 282 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 1 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=9_9 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Union[str, Any]=3_6 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[Any]=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Any=None , ) -> Tuple:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 3_0_0
return config
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
(
__SCREAMING_SNAKE_CASE
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = MraModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , ) -> Tuple:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> int:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : str ) -> Dict:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : List[Any] = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[Any] = ()
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="MRA does not output attentions" )
def UpperCAmelCase_ ( self : Any ) -> Dict:
return
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
__SCREAMING_SNAKE_CASE = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
__SCREAMING_SNAKE_CASE = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = 5_0_2_6_5
__SCREAMING_SNAKE_CASE = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
__SCREAMING_SNAKE_CASE = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = 5_0_2_6_5
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.