code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def __lowercase ( snake_case, snake_case = None, snake_case = None ):
"""simple docstring"""
if start is None:
__magic_name__ :int = 0
if end is None:
__magic_name__ :Optional[Any] = len(snake_case ) - 1
if start >= end:
return
__magic_name__ :Tuple = (start + end) // 2
slowsort(snake_case, snake_case, snake_case )
slowsort(snake_case, mid + 1, snake_case )
if sequence[end] < sequence[mid]:
__magic_name__ , __magic_name__ :List[Any] = sequence[mid], sequence[end]
slowsort(snake_case, snake_case, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 0 |
import os
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> List[Any]:
_A = len(grid[0] )
_A = len(_snake_case )
_A = 0
_A = 0
_A = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_snake_case ):
for j in range(n_rows - 3 ):
_A = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_A = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_A = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_A = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_A = max(
_snake_case , _snake_case , _snake_case , _snake_case )
if max_product > largest:
_A = max_product
return largest
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_A = []
with open(os.path.dirname(_snake_case ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_A = [[int(_snake_case ) for i in grid[j]] for j in range(len(_snake_case ) )]
return largest_product(_snake_case )
if __name__ == "__main__":
print(solution())
| 2 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def A_( A : float , A : float , A : float):
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance == 0:
return {"resistance": sqrt(pow(A , 2) - pow(A , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(A , 2) - pow(A , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(A , 2) + pow(A , 2))}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a ( a__ ):
snake_case__ = '''unispeech-sat'''
def __init__( self , _snake_case=32 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=1_28 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=3_20 , _snake_case=2 , _snake_case=0.1 , _snake_case=1_00 , _snake_case=2_56 , _snake_case=2_56 , _snake_case=0.1 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=2_56 , _snake_case=(5_12, 5_12, 5_12, 5_12, 15_00) , _snake_case=(5, 3, 3, 1, 1) , _snake_case=(1, 2, 3, 1, 1) , _snake_case=5_12 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=5_04 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = feat_extract_norm
lowerCAmelCase = feat_extract_activation
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = conv_bias
lowerCAmelCase = num_conv_pos_embeddings
lowerCAmelCase = num_conv_pos_embedding_groups
lowerCAmelCase = len(self.conv_dim )
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = feat_proj_dropout
lowerCAmelCase = final_dropout
lowerCAmelCase = layerdrop
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = vocab_size
lowerCAmelCase = num_clusters
lowerCAmelCase = do_stable_layer_norm
lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase = apply_spec_augment
lowerCAmelCase = mask_time_prob
lowerCAmelCase = mask_time_length
lowerCAmelCase = mask_time_min_masks
lowerCAmelCase = mask_feature_prob
lowerCAmelCase = mask_feature_length
lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase = num_codevectors_per_group
lowerCAmelCase = num_codevector_groups
lowerCAmelCase = contrastive_logits_temperature
lowerCAmelCase = feat_quantizer_dropout
lowerCAmelCase = num_negatives
lowerCAmelCase = codevector_dim
lowerCAmelCase = proj_codevector_dim
lowerCAmelCase = diversity_loss_weight
# ctc loss
lowerCAmelCase = ctc_loss_reduction
lowerCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 4 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = ['''audio_values''', '''audio_mask''']
def __init__( self , _lowercase=2_048 , _lowercase=1 , _lowercase=[16, 16] , _lowercase=128 , _lowercase=44_100 , _lowercase=86 , _lowercase=2_048 , _lowercase=0.0 , **_lowercase , ):
"""simple docstring"""
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase , )
_lowerCAmelCase = spectrogram_length
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = feature_size // self.patch_size[1]
_lowerCAmelCase = n_fft
_lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = padding_value
_lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_lowercase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_lowercase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = spectrogram(
_lowercase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_lowerCAmelCase = log_spec[:, :-1]
_lowerCAmelCase = log_spec - 20.0
_lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = None , _lowercase = False , _lowercase = False , **_lowercase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_lowerCAmelCase = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_lowerCAmelCase = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _lowercase ):
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_lowerCAmelCase = np.array(_lowercase ).astype(np.floataa )
# convert into correct format for padding
_lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_lowerCAmelCase = np.ones([len(_lowercase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(_lowercase ) ):
_lowerCAmelCase = audio_features[i]
_lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
_lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_lowerCAmelCase = {"""audio_values""": padded_audio_features}
_lowerCAmelCase = BatchFeature(data=_lowercase , tensor_type=_lowercase )
return encoded_inputs
| 5 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 6 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 0 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 7 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
| 8 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 9 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 0 |
from __future__ import annotations
_lowerCAmelCase = []
def _snake_case ( __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(__snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( __snake_case , __snake_case ):
if row >= len(__snake_case ):
solution.append(__snake_case )
printboard(__snake_case )
print()
return True
for i in range(len(__snake_case ) ):
if is_safe(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = 1
solve(__snake_case , row + 1 )
_UpperCamelCase = 0
return False
def _snake_case ( __snake_case ):
for i in range(len(__snake_case ) ):
for j in range(len(__snake_case ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_lowerCAmelCase = 8
_lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 10 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase (__A):
"""simple docstring"""
if isinstance(__A , collections.abc.Iterable):
return x
return (x, x)
@require_tf
class __A :
'''simple docstring'''
def a__ (self , A , A ) -> Dict:
"""simple docstring"""
pass
def a__ (self ) -> List[str]:
"""simple docstring"""
pass
def a__ (self ) -> int:
"""simple docstring"""
pass
def a__ (self , A , A , A , A , A=None , **A ) -> Any:
"""simple docstring"""
_a = VisionTextDualEncoderConfig.from_vision_text_configs(A , A )
_a = TFVisionTextDualEncoderModel(A )
_a = model(input_ids=A , pixel_values=A , attention_mask=A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def a__ (self , A , A , A , A , A=None , **A ) -> List[Any]:
"""simple docstring"""
_a , _a = self.get_vision_text_model(A , A )
_a = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_a = model(input_ids=A , pixel_values=A , attention_mask=A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__ (self , A , A , A , A , A=None , **A ) -> List[Any]:
"""simple docstring"""
_a , _a = self.get_vision_text_model(A , A )
_a = {'''vision_model''': vision_model, '''text_model''': text_model}
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**A )
_a = model(input_ids=A , pixel_values=A , attention_mask=A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__ (self , A , A , A , A , A=None , **A ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.get_vision_text_model(A , A )
_a = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_a = model(input_ids=A , pixel_values=A , attention_mask=A )
_a = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a = TFVisionTextDualEncoderModel.from_pretrained(A )
_a = model(input_ids=A , pixel_values=A , attention_mask=A )
_a = after_output[0].numpy()
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A , 1E-5 )
def a__ (self , A , A , A , A , A=None , **A ) -> Tuple:
"""simple docstring"""
_a , _a = self.get_vision_text_model(A , A )
_a = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_a = model(
input_ids=A , pixel_values=A , attention_mask=A , output_attentions=A )
_a = output.vision_model_output.attentions
self.assertEqual(len(A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__ (self , A , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = np.abs((a - b) ).max()
self.assertLessEqual(A , A , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**A )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
self.check_save_load(**A )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A )
@slow
def a__ (self ) -> int:
"""simple docstring"""
_a , _a = self.get_pretrained_model_and_inputs()
_a = model_a(**A )
_a = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A )
_a = TFVisionTextDualEncoderModel.from_pretrained(A )
_a = model_a(**A )
_a = after_outputs[0].numpy()
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A , 1E-5 )
@require_tf
class __A ( A , unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
_a = 13
_a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a__ (self , A , A ) -> str:
"""simple docstring"""
_a = TFViTModel(A , name='''vision_model''' )
_a = TFBertModel(A , name='''text_model''' )
return vision_model, text_model
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = TFViTModelTester(self )
_a = TFBertModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a , _a = vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( A , unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> int:
"""simple docstring"""
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
_a = 13
_a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a__ (self , A , A , A , A , A=None , **A ) -> List[str]:
"""simple docstring"""
_a , _a = self.get_vision_text_model(A , A )
_a = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_a = model(
input_ids=A , pixel_values=A , attention_mask=A , output_attentions=A )
_a = output.vision_model_output.attentions
self.assertEqual(len(A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__ (self , A , A ) -> Dict:
"""simple docstring"""
_a = TFDeiTModel(A , name='''vision_model''' )
_a = TFRobertaModel(A , name='''text_model''' )
return vision_model, text_model
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = TFDeiTModelTester(self )
_a = TFRobertaModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a , _a = vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( A , unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
_a = 13
_a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = TFCLIPVisionModel(A , name='''vision_model''' )
_a = TFBertModel(A , name='''text_model''' )
return vision_model, text_model
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = TFCLIPVisionModelTester(self )
_a = TFBertModelTester(self )
_a = clip_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=A )
_a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_a = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=A , padding=A , return_tensors='''np''' )
_a = model(**A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , A , atol=1E-3 ) )
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 0 |
from manim import *
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = Rectangle(height=0.5 , width=0.5)
lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
lowercase__ : List[Any] = [mem.copy() for i in range(6)]
lowercase__ : Tuple = [mem.copy() for i in range(6)]
lowercase__ : List[str] = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Tuple = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : List[Any] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Dict = Text("""CPU""" , font_size=24)
lowercase__ : List[str] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
cpu.move_to([-2.5, -0.5, 0])
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : str = [mem.copy() for i in range(1)]
lowercase__ : Any = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[int] = Text("""GPU""" , font_size=24)
lowercase__ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
gpu.set_x(gpu.get_x() - 1)
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = [mem.copy() for i in range(6)]
lowercase__ : str = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[Any] = Text("""Model""" , font_size=24)
lowercase__ : Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
model.move_to([3, -1.0, 0])
self.play(
Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , )
lowercase__ : Optional[Any] = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
lowercase__ : Any = Square(side_length=2.2)
key.move_to([-5, 2, 0])
lowercase__ : Optional[int] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5) , Write(SCREAMING_SNAKE_CASE_) , Write(SCREAMING_SNAKE_CASE_))
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = []
lowercase__ : str = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7)
cpu_target.move_to(SCREAMING_SNAKE_CASE_)
cpu_target.generate_target()
lowercase__ : Union[str, Any] = 0.4_6 / 4
lowercase__ : Tuple = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
cpu_targs.append(SCREAMING_SNAKE_CASE_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(SCREAMING_SNAKE_CASE_))
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5))
self.play(*SCREAMING_SNAKE_CASE_)
self.play(*SCREAMING_SNAKE_CASE_)
self.wait()
| 12 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : int = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Dict = 'sew'
def __init__( self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="group" , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="mean" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : int = feat_extract_norm
__lowerCamelCase : Optional[int] = feat_extract_activation
__lowerCamelCase : Any = list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = conv_bias
__lowerCamelCase : Dict = num_conv_pos_embeddings
__lowerCamelCase : Optional[Any] = num_conv_pos_embedding_groups
__lowerCamelCase : Dict = len(self.conv_dim )
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : List[Any] = squeeze_factor
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Dict = hidden_dropout
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Dict = activation_dropout
__lowerCamelCase : Optional[int] = feat_proj_dropout
__lowerCamelCase : Tuple = final_dropout
__lowerCamelCase : str = layerdrop
__lowerCamelCase : int = layer_norm_eps
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase : Optional[Any] = apply_spec_augment
__lowerCamelCase : Tuple = mask_time_prob
__lowerCamelCase : Any = mask_time_length
__lowerCamelCase : int = mask_time_min_masks
__lowerCamelCase : int = mask_feature_prob
__lowerCamelCase : Dict = mask_feature_length
__lowerCamelCase : List[str] = mask_feature_min_masks
# ctc loss
__lowerCamelCase : Any = ctc_loss_reduction
__lowerCamelCase : List[Any] = ctc_zero_infinity
# sequence classification
__lowerCamelCase : List[Any] = use_weighted_layer_sum
__lowerCamelCase : Union[str, Any] = classifier_proj_size
@property
def lowercase_ ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 13 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a__ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = '''https://pypi.org/pypi/diffusers/json'''
_a : Optional[int] = json.loads(request.urlopen(__a ).read() )['''releases'''].keys()
return sorted(__a ,key=lambda __a : version.Version(__a ) )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a ,exist_ok=__a )
_a : int = Path(__a ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( __a : Union[str, os.PathLike] ) -> Optional[Any]:
"""simple docstring"""
init_hf_modules()
_a : List[Any] = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a ,exist_ok=__a )
_a : Any = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : Optional[Any] = f.read()
# Imports of the form `import .xxx`
_a : List[str] = re.findall('''^\s*import\s+\.(\S+)\s*$''' ,__a ,flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' ,__a ,flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def __UpperCAmelCase ( __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[int] = False
_a : str = [module_file]
_a : Dict = []
# Let's recurse through all relative imports
while not no_change:
_a : Tuple = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Any = Path(__a ).parent
_a : int = [str(module_path / m ) for m in new_imports]
_a : str = [f for f in new_import_files if f not in all_relative_imports]
_a : int = [F"""{f}.py""" for f in new_import_files]
_a : int = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def __UpperCAmelCase ( __a : str ) -> Any:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : List[str] = f.read()
# Imports of the form `import xxx`
_a : List[str] = re.findall('''^\s*import\s+(\S+)\s*$''' ,__a ,flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' ,__a ,flags=re.MULTILINE )
# Only keep the top-level module
_a : int = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
_a : List[str] = list(set(__a ) )
_a : Tuple = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep ,'''.''' )
_a : Any = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a ,__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : Tuple = dict(inspect.getmembers(__a ,inspect.isclass ) )
_a : Optional[int] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls ,__a )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
_a : Optional[int] = cls
return pipeline_class
def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : str ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[Any] = str(__a )
_a : Tuple = os.path.join(__a ,__a )
if os.path.isfile(__a ):
_a : List[Any] = module_file_or_url
_a : str = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Optional[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Union[str, Any] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Dict = F"""v{revision}"""
elif revision == "main":
_a : Dict = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Union[str, Any] = COMMUNITY_PIPELINES_URL.format(revision=__a ,pipeline=__a )
try:
_a : int = cached_download(
__a ,cache_dir=__a ,force_download=__a ,proxies=__a ,resume_download=__a ,local_files_only=__a ,use_auth_token=__a ,)
_a : Dict = '''git'''
_a : str = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a ,__a ,cache_dir=__a ,force_download=__a ,proxies=__a ,resume_download=__a ,local_files_only=__a ,use_auth_token=__a ,)
_a : Union[str, Any] = os.path.join('''local''' ,'''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Dict = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : int = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Tuple = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a ,submodule_path / module_file )
for module_needed in modules_needed:
_a : Any = F"""{module_needed}.py"""
shutil.copy(os.path.join(__a ,__a ) ,submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a ,__a ):
_a : List[str] = use_auth_token
elif use_auth_token is True:
_a : Optional[int] = HfFolder.get_token()
else:
_a : str = None
_a : List[Any] = model_info(__a ,revision=__a ,token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : List[str] = submodule_path / commit_hash
_a : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a ,submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a ,F"""{module_needed}.py""" ,cache_dir=__a ,force_download=__a ,resume_download=__a ,proxies=__a ,use_auth_token=__a ,revision=__a ,local_files_only=__a ,)
return os.path.join(__a ,__a )
def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : str ,__a : Optional[str] = None ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,**__a : Tuple ,) -> int:
"""simple docstring"""
_a : Any = get_cached_module_file(
__a ,__a ,cache_dir=__a ,force_download=__a ,resume_download=__a ,proxies=__a ,use_auth_token=__a ,revision=__a ,local_files_only=__a ,)
return get_class_in_module(__a ,final_module.replace('''.py''' ,'''''' ) )
| 14 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : List[Any] = logging.get_logger(__name__)
def UpperCamelCase ( __magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase__ = [144, 192, 240]
lowercase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase__ = [96, 120, 144]
lowercase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase__ = [64, 80, 96]
lowercase__ = [16, 16, 24, 48, 64, 80, 320]
lowercase__ = 0.0_5
lowercase__ = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase__ = 512
lowercase__ = 16
lowercase__ = 21
lowercase__ = """pascal-voc-id2label.json"""
else:
lowercase__ = 1000
lowercase__ = """imagenet-1k-id2label.json"""
lowercase__ = """huggingface/label-files"""
lowercase__ = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
lowercase__ = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any]=False ) -> List[str]:
"""simple docstring"""
for i in range(1 , 6 ):
if f'''layer_{i}.''' in name:
lowercase__ = name.replace(f'''layer_{i}.''' , f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
lowercase__ = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase__ = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase__ = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase__ = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase__ = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase__ = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase__ = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase__ = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase__ = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
lowercase__ = name.replace(f'''.{i}.{j}.''' , f'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
lowercase__ = name.replace(f'''.{i}.{j}.''' , f'''.{i}.''' )
if "expand_1x1" in name:
lowercase__ = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase__ = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase__ = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f'''.global_rep.{i}.weight''' in name:
lowercase__ = name.replace(f'''.global_rep.{i}.weight''' , """.layernorm.weight""" )
if f'''.global_rep.{i}.bias''' in name:
lowercase__ = name.replace(f'''.global_rep.{i}.bias''' , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase__ = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase__ = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase__ = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase__ = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase__ = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase__ = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase__ = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase__ = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase__ = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase__ = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase__ = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase__ = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase__ = """mobilevit.""" + name
return name
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any]=False ) -> List[str]:
"""simple docstring"""
if base_model:
lowercase__ = """"""
else:
lowercase__ = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__magic_name__ )
if key[:8] == "encoder.":
lowercase__ = key[8:]
if "qkv" in key:
lowercase__ = key.split(""".""" )
lowercase__ = int(key_split[0][6:] ) - 1
lowercase__ = int(key_split[3] )
lowercase__ = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
lowercase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase__ = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
else:
lowercase__ = val
return orig_state_dict
def UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = get_mobilevit_config(__magic_name__ )
# load original state_dict
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase__ = MobileViTForSemanticSegmentation(__magic_name__ ).eval()
else:
lowercase__ = MobileViTForImageClassification(__magic_name__ ).eval()
lowercase__ = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase__ = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase__ = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase__ = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase__ = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
lowercase__ = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
lowercase__ = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1E-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
lowercase__ = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(__magic_name__ , organization="""apple""" )
model.push_to_hub(__magic_name__ , organization="""apple""" )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 15 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 0 |
def __a ( A__ : int ):
SCREAMING_SNAKE_CASE = [1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0, 0
SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
for _ in range(1 , A__ ):
SCREAMING_SNAKE_CASE = min(A__ , A__ , A__ )
ugly_nums.append(A__ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(2_0_0) = }')
| 16 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase_ ( unittest.TestCase ):
_lowercase : List[str] = JukeboxTokenizer
_lowercase : Any = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def lowerCAmelCase_ ( self : List[Any] ):
import torch
__A : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
__A : Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__A : Any = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCAmelCase_ ( self : Optional[Any] ):
import torch
__A : int = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
__A : List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__A : Dict = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 17 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_SCREAMING_SNAKE_CASE = random.Random()
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=1.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
'''simple docstring'''
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=400 , _lowerCAmelCase=2000 , _lowerCAmelCase=24 , _lowerCAmelCase=24 , _lowerCAmelCase=0.0 , _lowerCAmelCase=16000 , _lowerCAmelCase=True , _lowerCAmelCase=True , ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = feature_size
_lowerCAmelCase = num_mel_bins
_lowerCAmelCase = padding_value
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = do_normalize
def _snake_case ( self ) -> str:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _snake_case ( self , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> List[Any]:
def _flatten(_lowerCAmelCase ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
_lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = SpeechaTextFeatureExtractionTester(self )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]:
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def _snake_case ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase = [None, 16, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = [np.sum(_lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase = [None, 16, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = [np.sum(_lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding="max_length" , max_length=4 , truncation=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding="longest" , max_length=4 , truncation=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding="longest" , max_length=16 , truncation=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _snake_case ( self ) -> List[str]:
import torch
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
from datasets import load_dataset
_lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _snake_case ( self ) -> Any:
# fmt: off
_lowerCAmelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) )
| 18 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_a = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_a = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_a = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase = len([g for position, g in enumerate(__snake_case ) if g == main_target[position]] )
return (item, float(__snake_case ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase = random.randint(0, len(__snake_case ) - 1 )
_UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = list(__snake_case )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
_UpperCamelCase = random.choice(__snake_case )
return "".join(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase = int(parent_a[1] * 1_00 ) + 1
_UpperCamelCase = 10 if child_n >= 10 else child_n
for _ in range(__snake_case ):
_UpperCamelCase = population_score[random.randint(0, __snake_case )][0]
_UpperCamelCase , _UpperCamelCase = crossover(parent_a[0], __snake_case )
# Append new string to the population list.
pop.append(mutate(__snake_case, __snake_case ) )
pop.append(mutate(__snake_case, __snake_case ) )
return pop
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__snake_case )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__snake_case )
# Generate random starting population.
_UpperCamelCase = []
for _ in range(__snake_case ):
population.append(''''''.join([random.choice(__snake_case ) for i in range(len(__snake_case ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase , _UpperCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__snake_case )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase = [evaluate(__snake_case, __snake_case ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase = sorted(__snake_case, key=lambda __snake_case : x[1], reverse=__snake_case )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__snake_case )
# Normalize population score to be between 0 and 1.
_UpperCamelCase = [
(item, score / len(__snake_case )) for item, score in population_score
]
# This is selection
for i in range(__snake_case ):
population.extend(select(population_score[int(__snake_case )], __snake_case, __snake_case ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__snake_case ) > N_POPULATION:
break
if __name__ == "__main__":
_a = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
_a = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
_a , _a , _a = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 19 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=100 , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , ) -> Dict:
a__ =parent
a__ =vocab_size
a__ =batch_size
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =is_training
a__ =use_labels
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =type_sequence_label_size
a__ =initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ =(image_size // patch_size) ** 2
a__ =num_patches + 1
def __UpperCamelCase ( self) -> Dict:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =FlaxBeitModel(config=lowercase_)
a__ =model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =FlaxBeitForMaskedImageModeling(config=lowercase_)
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> List[str]:
a__ =self.type_sequence_label_size
a__ =FlaxBeitForImageClassification(config=lowercase_)
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ =1
a__ =FlaxBeitForImageClassification(lowercase_)
a__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ =model(lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __UpperCamelCase ( self) -> None:
a__ =FlaxBeitModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> Dict:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model_class(lowercase_)
@jax.jit
def model_jitted(lowercase_ , **lowercase_):
return model(pixel_values=lowercase_ , **lowercase_)
with self.subTest('JIT Enabled'):
a__ =model_jitted(**lowercase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
a__ =model_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def __UpperCamelCase ( self) -> int:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
a__ =model_class_name.from_pretrained('microsoft/beit-base-patch16-224')
a__ =model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(lowercase_)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224') if is_vision_available() else None
@slow
def __UpperCamelCase ( self) -> Dict:
a__ =FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k')
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='np').pixel_values
# prepare bool_masked_pos
a__ =np.ones((1, 196) , dtype=lowercase_)
# forward pass
a__ =model(pixel_values=lowercase_ , bool_masked_pos=lowercase_)
a__ =outputs.logits
# verify the logits
a__ =(1, 196, 8192)
self.assertEqual(logits.shape , lowercase_)
a__ =np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]])
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowercase_ , atol=1e-2))
@slow
def __UpperCamelCase ( self) -> List[Any]:
a__ =FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224')
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='np')
# forward pass
a__ =model(**lowercase_)
a__ =outputs.logits
# verify the logits
a__ =(1, 1000)
self.assertEqual(logits.shape , lowercase_)
a__ =np.array([-1.23_85, -1.09_87, -1.01_08])
self.assertTrue(np.allclose(logits[0, :3] , lowercase_ , atol=1e-4))
a__ =281
self.assertEqual(logits.argmax(-1).item() , lowercase_)
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k')
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='np')
# forward pass
a__ =model(**lowercase_)
a__ =outputs.logits
# verify the logits
a__ =(1, 21841)
self.assertEqual(logits.shape , lowercase_)
a__ =np.array([1.68_81, -0.27_87, 0.59_01])
self.assertTrue(np.allclose(logits[0, :3] , lowercase_ , atol=1e-4))
a__ =2396
self.assertEqual(logits.argmax(-1).item() , lowercase_)
| 20 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase_ : List[str] = 500000
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = os.path.split(__file__)
UpperCAmelCase_ : Any = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCAmelCase_ ( lowerCamelCase , **lowerCamelCase ):
__magic_name__ : Tuple =dataset.map(**lowerCamelCase )
@get_duration
def lowerCAmelCase_ ( lowerCamelCase , **lowerCamelCase ):
__magic_name__ : List[str] =dataset.filter(**lowerCamelCase )
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : str =datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__magic_name__ : str =generate_example_dataset(
os.path.join(lowerCamelCase , """dataset.arrow""" ) , lowerCamelCase , num_examples=lowerCamelCase )
__magic_name__ : Dict =transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
__magic_name__ : Union[str, Any] =map(lowerCamelCase )
__magic_name__ : List[Any] =map(lowerCamelCase , batched=lowerCamelCase )
__magic_name__ : Optional[int] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
__magic_name__ : Optional[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
__magic_name__ : int =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
__magic_name__ : List[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
__magic_name__ : Optional[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
__magic_name__ : Optional[Any] =map(lowerCamelCase , function=lowerCamelCase , batched=lowerCamelCase )
__magic_name__ : Optional[int] =filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase , """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 21 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case_ ():
'''simple docstring'''
_a = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=UpperCamelCase )
_a = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=UpperCamelCase )
env_command_parser(subparsers=UpperCamelCase )
launch_command_parser(subparsers=UpperCamelCase )
tpu_command_parser(subparsers=UpperCamelCase )
test_command_parser(subparsers=UpperCamelCase )
# Let's go
_a = parser.parse_args()
if not hasattr(UpperCamelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main()
| 22 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : int = """https://openaipublic.azureedge.net/jukebox/models/"""
snake_case__ : List[Any] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _snake_case (__lowercase):
if key.endswith('.model.1.bias') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.1.bias' , '.conv1d_1.bias')
elif key.endswith('.model.1.weight') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.1.weight' , '.conv1d_1.weight')
elif key.endswith('.model.3.bias') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.3.bias' , '.conv1d_2.bias')
elif key.endswith('.model.3.weight') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.3.weight' , '.conv1d_2.weight')
if "conditioner_blocks.0." in key:
UpperCamelCase_ = key.replace('conditioner_blocks.0' , 'conditioner_blocks')
if "prime_prior" in key:
UpperCamelCase_ = key.replace('prime_prior' , 'encoder')
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase_ = key.replace('.emb.' , '.')
if key.endswith('k'): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook')
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.')
if "x_emb.emb." in key:
UpperCamelCase_ = key.replace('0.x_emb.emb' , 'embed_tokens')
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm')
if ".ln" in key:
return key.replace('.ln' , '.layer_norm')
if "_ln" in key:
return key.replace('_ln' , '_layer_norm')
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in')
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head')
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out')
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens')
return key
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = {}
import re
UpperCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)')
UpperCamelCase_ = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)')
UpperCamelCase_ = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)')
UpperCamelCase_ = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)')
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowercase):
UpperCamelCase_ = re_encoder_block_conv_in.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3])
UpperCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase_ = re_encoder_block_conv_in.sub(__lowercase , __lowercase)
elif re_encoder_block_resnet.fullmatch(__lowercase):
UpperCamelCase_ = re_encoder_block_resnet.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3])
UpperCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
UpperCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase_ = prefix + resnet_block
UpperCamelCase_ = re_encoder_block_resnet.sub(__lowercase , __lowercase)
elif re_encoder_block_proj_out.fullmatch(__lowercase):
UpperCamelCase_ = re_encoder_block_proj_out.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
UpperCamelCase_ = re_encoder_block_proj_out.sub(__lowercase , __lowercase)
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowercase):
UpperCamelCase_ = re_decoder_block_conv_out.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3]) - 2
UpperCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase_ = re_decoder_block_conv_out.sub(__lowercase , __lowercase)
elif re_decoder_block_resnet.fullmatch(__lowercase):
UpperCamelCase_ = re_decoder_block_resnet.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3]) - 2
UpperCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
UpperCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase_ = prefix + resnet_block
UpperCamelCase_ = re_decoder_block_resnet.sub(__lowercase , __lowercase)
elif re_decoder_block_proj_in.fullmatch(__lowercase):
UpperCamelCase_ = re_decoder_block_proj_in.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
UpperCamelCase_ = re_decoder_block_proj_in.sub(__lowercase , __lowercase)
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowercase):
UpperCamelCase_ = re_prior_cond_conv_out.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[1]) * 2 + int(groups[2]) - 2
UpperCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase_ = re_prior_cond_conv_out.sub(__lowercase , __lowercase)
elif re_prior_cond_resnet.fullmatch(__lowercase):
UpperCamelCase_ = re_prior_cond_resnet.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[1]) * 2 + int(groups[2]) - 2
UpperCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
UpperCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase_ = prefix + resnet_block
UpperCamelCase_ = re_prior_cond_resnet.sub(__lowercase , __lowercase)
elif re_prior_cond_proj_in.fullmatch(__lowercase):
UpperCamelCase_ = re_prior_cond_proj_in.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
UpperCamelCase_ = re_prior_cond_proj_in.sub(__lowercase , __lowercase)
# keep original key
else:
UpperCamelCase_ = original_key
UpperCamelCase_ = replace_key(__lowercase)
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""")
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
UpperCamelCase_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""")
UpperCamelCase_ = original_key
UpperCamelCase_ = original_key
UpperCamelCase_ = value
return new_dict
@torch.no_grad()
def _snake_case (__lowercase=None , __lowercase=None):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/")[-1]}"""):
UpperCamelCase_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=__lowercase)
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=__lowercase)
open(f"""{pytorch_dump_folder_path}/{file.split("/")[-1]}""" , 'wb').write(r.content)
UpperCamelCase_ = MODEL_MAPPING[model_name.split('/')[-1]]
UpperCamelCase_ = JukeboxConfig.from_pretrained(__lowercase)
UpperCamelCase_ = JukeboxModel(__lowercase)
UpperCamelCase_ = []
UpperCamelCase_ = {}
for i, dict_name in enumerate(__lowercase):
UpperCamelCase_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/")[-1]}""")['model']
UpperCamelCase_ = {}
for k in old_dic.keys():
if k.endswith('.b'):
UpperCamelCase_ = old_dic[k]
elif k.endswith('.w'):
UpperCamelCase_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase_ = old_dic[k]
else:
UpperCamelCase_ = old_dic[k]
UpperCamelCase_ = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
UpperCamelCase_ = fix_jukebox_keys(__lowercase , model.state_dict() , __lowercase , __lowercase)
weight_dict.append(__lowercase)
UpperCamelCase_ = weight_dict.pop(0)
model.vqvae.load_state_dict(__lowercase)
for i in range(len(__lowercase)):
model.priors[i].load_state_dict(weight_dict[2 - i])
Path(__lowercase).mkdir(exist_ok=__lowercase)
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w') as txtfile:
json.dump(__lowercase , __lowercase)
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(__lowercase)
return weight_dict
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
snake_case__ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 23 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Optional[Any] = ''''''
UpperCAmelCase_ : List[Any] = ''''''
UpperCAmelCase_ : Any = 1 # (0 is vertical, 1 is horizontal)
def _UpperCamelCase ()-> None:
'''simple docstring'''
__snake_case , __snake_case = get_dataset(_lowerCamelCase , _lowerCamelCase )
print('''Processing...''' )
__snake_case , __snake_case , __snake_case = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case = random_chars(32 )
__snake_case = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__snake_case = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(_lowerCamelCase )} with {file_name}''' )
__snake_case = []
for anno in new_annos[index]:
__snake_case = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_lowerCamelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> tuple[list, list]:
'''simple docstring'''
__snake_case = []
__snake_case = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , '''*.txt''' ) ):
__snake_case = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__snake_case = in_file.readlines()
__snake_case = os.path.join(_lowerCamelCase , f'''{label_name}.jpg''' )
__snake_case = []
for obj_list in obj_lists:
__snake_case = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def _UpperCamelCase (_lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int = 1 )-> tuple[list, list, list]:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = []
for idx in range(len(_lowerCamelCase ) ):
__snake_case = []
__snake_case = img_list[idx]
path_list.append(_lowerCamelCase )
__snake_case = anno_list[idx]
__snake_case = cva.imread(_lowerCamelCase )
if flip_type == 1:
__snake_case = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__snake_case = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _UpperCamelCase (_lowerCamelCase : int = 32 )-> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__snake_case = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 24 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE : int = [sys.executable] + distributed_args
execute_subprocess_async(a , env=os.environ.copy() )
| 25 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *_lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=True , _lowerCamelCase=2 ) -> Union[str, Any]:
"""simple docstring"""
from .. import __version__
__snake_case : Optional[Any] = take_from
__snake_case : Union[str, Any] = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Dict = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case : str = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : List[str] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : Union[str, Any] = call_frame.lineno
__snake_case : Union[str, Any] = call_frame.function
__snake_case , __snake_case : str = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 26 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase_ = TypeVar("T")
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any | T = None
SCREAMING_SNAKE_CASE : int = len(A )
SCREAMING_SNAKE_CASE : list[T] = [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE : int = fnc
self.build()
def UpperCamelCase_ ( self ):
'''simple docstring'''
for p in range(self.N - 1, 0, -1 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
p += self.N
SCREAMING_SNAKE_CASE : str = v
while p > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = p // 2
SCREAMING_SNAKE_CASE : List[str] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def UpperCamelCase_ ( self, A, A ): # noqa: E741
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = l + self.N, r + self.N
SCREAMING_SNAKE_CASE : T | None = None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE : Any = self.st[l] if res is None else self.fn(A, self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = self.st[r] if res is None else self.fn(A, self.st[r] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase_ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
UpperCamelCase_ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
UpperCamelCase_ = SegmentTree(test_array, min)
UpperCamelCase_ = SegmentTree(test_array, max)
UpperCamelCase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowercase__( ):
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Dict = reduce(__UpperCamelCase ,test_array[i : j + 1] )
SCREAMING_SNAKE_CASE : Dict = reduce(__UpperCamelCase ,test_array[i : j + 1] )
SCREAMING_SNAKE_CASE : Dict = reduce(lambda __UpperCamelCase ,__UpperCamelCase : a + b ,test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__UpperCamelCase ,__UpperCamelCase )
assert max_range == max_segment_tree.query(__UpperCamelCase ,__UpperCamelCase )
assert sum_range == sum_segment_tree.query(__UpperCamelCase ,__UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 28 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
return n == n[::-1]
def lowercase ( lowerCAmelCase__ = 1_000_000 ):
lowerCamelCase_ = 0
for i in range(1 ,lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 29 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__a = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase = 10 , _lowercase = 2 ):
'''simple docstring'''
def get_dataset(_lowercase ):
UpperCAmelCase_ : Optional[int] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Tuple = get_dataset(_lowercase )
UpperCAmelCase_ : List[Any] = get_dataset(_lowercase )
UpperCAmelCase_ : Optional[int] = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_, UpperCAmelCase_ : int = batch
UpperCAmelCase_ : List[Any] = model(_lowercase )
UpperCAmelCase_ : Dict = torch.nn.functional.mse_loss(_lowercase , _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
super().__init__()
UpperCAmelCase_ : List[str] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : int = nn.Parameter(torch.randn(1 ) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return x * self.a + self.b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase_, UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(total_limit=1 ,project_dir=_SCREAMING_SNAKE_CASE ,automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def a__ ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Union[str, Any] = Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
UpperCAmelCase_ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''initial''' )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
((UpperCAmelCase_), (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((UpperCAmelCase_), (UpperCAmelCase_)) : int = model.a.item(), model.b.item()
UpperCAmelCase_ : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Dict = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = dummy_dataloaders()
UpperCAmelCase_ : List[str] = Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
accelerator.load_state(_SCREAMING_SNAKE_CASE )
((UpperCAmelCase_), (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = train(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoint''' )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(_SCREAMING_SNAKE_CASE )
test_rands += train(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((UpperCAmelCase_), (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : str = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase_, UpperCAmelCase_ : int = dummy_dataloaders()
UpperCAmelCase_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((UpperCAmelCase_), (UpperCAmelCase_)) : int = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
UpperCAmelCase_ : int = train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((UpperCAmelCase_), (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = dummy_dataloaders()
UpperCAmelCase_ : List[str] = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_0''' ) )
((UpperCAmelCase_), (UpperCAmelCase_)) : Dict = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = train(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_1''' ) )
test_rands += train(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((UpperCAmelCase_), (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : str = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Tuple = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Optional[int] = Accelerator()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def a__ ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase_ : Union[str, Any] = torch.optim.lr_scheduler.StepLR(_SCREAMING_SNAKE_CASE ,step_size=1 ,gamma=0.99 )
UpperCAmelCase_, UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : str = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase_ : List[str] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : int = scheduler.state_dict()
train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_0''' ) )
self.assertEqual(_SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
def a__ ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : List[str] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE ,total_limit=2 )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_10''' ) ) )
@require_cuda
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_SCREAMING_SNAKE_CASE ,env=os.environ.copy() )
if __name__ == "__main__":
__a = '/tmp/accelerate/state_checkpointing'
__a = DummyModel()
__a = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__a = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__a ,__a = dummy_dataloaders()
__a = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__a = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__a ,__a ,__a ,__a ,__a = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__a ,__a = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__a = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__a = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__a = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__a = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 30 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[Any] = False
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase__ : Optional[Any] = parser.parse_args()
lowerCamelCase__ : List[Any] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCamelCase__ : str = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCamelCase__ : str = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCamelCase__ : Optional[int] = reader.read()
lowerCamelCase__ : Optional[int] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCamelCase__ : Any = UNetaDModel(**config)
else:
lowerCamelCase__ : Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCamelCase__ : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase__ : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase__ : List[str] = config[key]
del config[key]
lowerCamelCase__ : str = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowerCamelCase__ : List[str] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowerCamelCase__ : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCamelCase__ : Dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCamelCase__ : int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCamelCase__ : Dict = param_value
lowerCamelCase__ : str = True
if not has_changed:
lowerCamelCase__ : Dict = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 31 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase ):
_UpperCAmelCase = parent
def UpperCamelCase( self ):
return {}
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
_UpperCAmelCase = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase( self ):
_UpperCAmelCase = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase( self ):
# Initialize feature_extractor
_UpperCAmelCase = self.feature_extraction_class()
# Test not batched input
_UpperCAmelCase = get_html_strings()[0]
_UpperCAmelCase = feature_extractor(_UpperCamelCase )
# fmt: off
_UpperCAmelCase = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
_UpperCAmelCase = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , _UpperCamelCase )
self.assertEqual(encoding.xpaths , _UpperCamelCase )
# Test batched
_UpperCAmelCase = get_html_strings()
_UpperCAmelCase = feature_extractor(_UpperCamelCase )
# fmt: off
_UpperCAmelCase = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
_UpperCAmelCase = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _UpperCamelCase )
self.assertEqual(encoding.xpaths , _UpperCamelCase )
| 32 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Any , *_a:List[Any] , **_a:Any ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 0 |
"""simple docstring"""
import numpy as np
def __snake_case ( _lowercase ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand()
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : int = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Any , _lowercase : int , *_lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = accelerate_config_file
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE__ : int = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
SCREAMING_SNAKE_CASE__ : Any = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else f"""\t{accelerate_config}"""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Tuple = torch.__version__
SCREAMING_SNAKE_CASE__ : int = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : int = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE__ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE__ : List[Any] = bool(tf.config.list_physical_devices('''GPU''' ) )
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE__ : Optional[int] = flax.__version__
SCREAMING_SNAKE_CASE__ : Optional[int] = jax.__version__
SCREAMING_SNAKE_CASE__ : int = jaxlib.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Any ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
from math import pi
def lowercase ( __A : int , __A : int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 36 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : int = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gptsan-japanese'
_lowercase = [
'past_key_values',
]
_lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , lowerCamelCase__ : Tuple=36_000 , lowerCamelCase__ : Optional[int]=1_280 , lowerCamelCase__ : Any=1_024 , lowerCamelCase__ : Dict=8_192 , lowerCamelCase__ : Any=4_096 , lowerCamelCase__ : Optional[int]=128 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : List[Any]=128 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : int=1E-5 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Any="float32" , lowerCamelCase__ : str=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : str=0.002 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=35_998 , lowerCamelCase__ : List[str]=35_995 , lowerCamelCase__ : int=35_999 , **lowerCamelCase__ : int , ):
a__ : Optional[Any] = vocab_size
a__ : Tuple = max_position_embeddings
a__ : str = d_model
a__ : Tuple = d_ff
a__ : Tuple = d_ext
a__ : List[Any] = d_spout
a__ : str = num_switch_layers
a__ : List[Any] = num_ext_layers
a__ : List[Any] = num_switch_layers + num_ext_layers
a__ : Dict = num_heads
a__ : List[str] = num_experts
a__ : Optional[Any] = expert_capacity
a__ : str = dropout_rate
a__ : int = layer_norm_epsilon
a__ : Any = router_bias
a__ : Dict = router_jitter_noise
a__ : Any = router_dtype
a__ : Optional[Any] = router_ignore_padding_tokens
a__ : Optional[Any] = output_hidden_states
a__ : List[Any] = output_attentions
a__ : Any = initializer_factor
a__ : List[Any] = output_router_logits
a__ : Dict = use_cache
super().__init__(
separator_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 37 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Dict = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Any = seq_length
snake_case__ : int = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : Dict = use_token_type_ids
snake_case__ : Optional[Any] = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : str = embedding_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : Tuple = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Union[str, Any] = scope
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : str = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = MegatronBertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = MegatronBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = MegatronBertForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = MegatronBertForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = MegatronBertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = MegatronBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.num_labels
snake_case__ : Optional[int] = MegatronBertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = MegatronBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_choices
snake_case__ : Dict = MegatronBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Tuple = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[int] = config_and_inputs
snake_case__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
# test_resize_embeddings = False
lowerCamelCase__ = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : List[Any] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
snake_case__ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MegatronBertModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return torch.tensor(
__magic_name__ , dtype=torch.long , device=__magic_name__ , )
A_ : str = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def __UpperCamelCase ( self ):
snake_case__ : int = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
snake_case__ : Union[str, Any] = os.path.join(os.environ["""MYDIR"""] , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = MegatronBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.half()
snake_case__ : Any = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )[0]
snake_case__ : List[str] = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : str = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
snake_case__ : List[Any] = output[0, ii, jj]
snake_case__ : Optional[int] = expected[3 * ii + jj]
snake_case__ : Dict = """ii={} jj={} a={} b={}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(math.isclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rel_tol=__SCREAMING_SNAKE_CASE , abs_tol=__SCREAMING_SNAKE_CASE ) , msg=__SCREAMING_SNAKE_CASE )
| 38 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 0 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 20 ):
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 39 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : int = NllbTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, legacy_behaviour=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[int] = vocab_file
UpperCamelCase : int = False if not self.vocab_file else True
UpperCamelCase : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Optional[int] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : str = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "eng_Latn", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "fra_Latn", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Union[str, Any] = src_lang
UpperCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : Optional[int] = [self.cur_lang_code]
UpperCamelCase : List[str] = [self.eos_token_id]
UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Any = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : int = [self.cur_lang_code]
UpperCamelCase : Tuple = [self.eos_token_id]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 8.988e9 # units = N * m^s * C^-2
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__lowercase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__lowercase = (COULOMBS_CONSTANT * charge_product / abs(A__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 0 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : int = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : int,__A : int,__A : float,**__A : List[str] ):
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[Any] = sampling_rate
_lowerCamelCase : str = padding_value
_lowerCamelCase : List[Any] = kwargs.pop("padding_side","right" )
_lowerCamelCase : int = kwargs.pop("return_attention_mask",__A )
super().__init__(**__A )
def lowerCamelCase_ ( self : Dict,__A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
],__A : Union[bool, str, PaddingStrategy] = True,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__A,(list, tuple) ) and isinstance(processed_features[0],(dict, BatchFeature) ):
_lowerCamelCase : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
_lowerCamelCase : Any = processed_features[self.model_input_names[0]]
_lowerCamelCase : int = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__A ) == 0:
if return_attention_mask:
_lowerCamelCase : Tuple = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowerCamelCase : Union[str, Any] = required_input[0]
if isinstance(__A,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowerCamelCase : Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__A ):
_lowerCamelCase : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__A ):
_lowerCamelCase : Union[str, Any] = "tf"
elif is_torch_tensor(__A ):
_lowerCamelCase : str = "pt"
elif isinstance(__A,(int, float, list, tuple, np.ndarray) ):
_lowerCamelCase : Tuple = "np"
else:
raise ValueError(
f'type of {first_element} unknown: {type(__A )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0],(int, float) ):
_lowerCamelCase : List[str] = to_numpy(__A )
else:
_lowerCamelCase : str = [to_numpy(__A ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowerCamelCase : int = self._get_padding_strategies(padding=__A,max_length=__A )
_lowerCamelCase : Optional[Any] = processed_features[self.model_input_names[0]]
_lowerCamelCase : Union[str, Any] = len(__A )
if not all(len(__A ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
_lowerCamelCase : Optional[Any] = []
for i in range(__A ):
_lowerCamelCase : Dict = {k: v[i] for k, v in processed_features.items()}
# truncation
_lowerCamelCase : Optional[int] = self._truncate(
__A,max_length=__A,pad_to_multiple_of=__A,truncation=__A,)
truncated_inputs.append(__A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowerCamelCase : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowerCamelCase : Any = PaddingStrategy.MAX_LENGTH
_lowerCamelCase : str = {}
for i in range(__A ):
# padding
_lowerCamelCase : Tuple = self._pad(
truncated_inputs[i],max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
for key, value in outputs.items():
if key not in batch_outputs:
_lowerCamelCase : List[Any] = []
if value.dtype is np.dtype(np.floataa ):
_lowerCamelCase : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(__A )
return BatchFeature(__A,tensor_type=__A )
def lowerCamelCase_ ( self : List[Any],__A : Union[Dict[str, np.ndarray], BatchFeature],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowerCamelCase : List[Any] = len(__A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowerCamelCase : Tuple = np.ones(len(__A ),dtype=np.intaa )
if needs_to_be_padded:
_lowerCamelCase : List[Any] = max_length - len(__A )
if self.padding_side == "right":
if return_attention_mask:
_lowerCamelCase : Tuple = np.pad(
processed_features["attention_mask"],(0, difference) )
_lowerCamelCase : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowerCamelCase : Dict = np.pad(
__A,__A,"constant",constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowerCamelCase : int = np.pad(
processed_features["attention_mask"],(difference, 0) )
_lowerCamelCase : Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowerCamelCase : Union[str, Any] = np.pad(
__A,__A,"constant",constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def lowerCamelCase_ ( self : int,__A : Union[Dict[str, np.ndarray], BatchFeature],__A : Optional[int] = None,__A : Optional[int] = None,__A : Optional[bool] = None,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
_lowerCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase : Optional[Any] = len(__A ) > max_length
if needs_to_be_truncated:
_lowerCamelCase : List[str] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowerCamelCase : Optional[int] = processed_features["attention_mask"][:max_length]
return processed_features
def lowerCamelCase_ ( self : int,__A : List[str]=False,__A : Optional[int]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
_lowerCamelCase : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__A,__A ):
_lowerCamelCase : Optional[Any] = PaddingStrategy(__A )
elif isinstance(__A,__A ):
_lowerCamelCase : Tuple = padding
else:
_lowerCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 44 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: str=16 ,__lowerCAmelCase: Any=[1, 2, 1] ,__lowerCAmelCase: List[Any]=[2, 2, 4] ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: List[str]=2.0 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-5 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: int=8 ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : Optional[int] = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : Union[str, Any] = mlp_ratio
_lowerCamelCase : Tuple = qkv_bias
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Optional[int] = use_absolute_embeddings
_lowerCamelCase : Optional[int] = patch_norm
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = scope
_lowerCamelCase : Any = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Dict = encoder_stride
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Tuple ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = SwinvaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
_lowerCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = SwinvaForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Optional[Any] = SwinvaForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.type_sequence_label_size
_lowerCamelCase : Tuple = SwinvaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = SwinvaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,embed_dim=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = True
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.attentions
_lowerCamelCase : int = len(self.model_tester.depths )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : int = True
_lowerCamelCase : Any = config.window_size**2
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
if hasattr(self.model_tester ,"num_hidden_states_types" ):
_lowerCamelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowerCamelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : Optional[Any] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# Swinv2 has a different seq_length
_lowerCamelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_lowerCamelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__lowerCAmelCase ,__lowerCAmelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Tuple = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = SwinvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(config=__lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : str = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__a : Optional[Any] = 1
__a : Tuple = 1
__a : Dict = {1: 1}
for inputa in range(2 , lowerCamelCase_ ):
__a : str = 0
__a : Optional[int] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__a : Union[str, Any] = counter
if counter > pre_counter:
__a : Dict = inputa
__a : Optional[int] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 47 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A :
def __init__( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int=13 , __magic_name__ : List[Any]=7 , __magic_name__ : Any=True , __magic_name__ : Optional[int]=True , __magic_name__ : Tuple=False , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=32 , __magic_name__ : str=5 , __magic_name__ : int=4 , __magic_name__ : Dict=37 , __magic_name__ : Tuple="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Dict=16 , __magic_name__ : Any=2 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=4 , __magic_name__ : Union[str, Any]=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , use_stable_embedding=__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
lowerCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )["hidden_states"][0]
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["hidden_states"][0]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ :Tuple = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ :List[str] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ :List[str] = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = "single_label_classification"
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = "multi_label_classification"
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
original_model.to(__magic_name__ )
original_model.eval()
lowerCAmelCase__ = original_model(__magic_name__ ).last_hidden_state
lowerCAmelCase__ = original_model(__magic_name__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = {"type": scaling_type, "factor": 10.0}
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
scaled_model.to(__magic_name__ )
scaled_model.eval()
lowerCAmelCase__ = scaled_model(__magic_name__ ).last_hidden_state
lowerCAmelCase__ = scaled_model(__magic_name__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
| 48 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : torch.FloatTensor
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Optional[int] , _lowercase : int = 3 , _lowercase : int = 3 , _lowercase : Tuple[str] = ("DownEncoderBlock2D",) , _lowercase : Tuple[str] = ("UpDecoderBlock2D",) , _lowercase : Tuple[int] = (64,) , _lowercase : int = 1 , _lowercase : str = "silu" , _lowercase : int = 3 , _lowercase : int = 32 , _lowercase : int = 2_56 , _lowercase : int = 32 , _lowercase : Optional[int] = None , _lowercase : float = 0.18_215 , _lowercase : str = "group" , ):
super().__init__()
# pass init params to Encoder
__UpperCAmelCase = Encoder(
in_channels=_lowercase , out_channels=_lowercase , down_block_types=_lowercase , block_out_channels=_lowercase , layers_per_block=_lowercase , act_fn=_lowercase , norm_num_groups=_lowercase , double_z=_lowercase , )
__UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__UpperCAmelCase = nn.Convad(_lowercase , _lowercase , 1 )
__UpperCAmelCase = VectorQuantizer(_lowercase , _lowercase , beta=0.25 , remap=_lowercase , sane_index_shape=_lowercase )
__UpperCAmelCase = nn.Convad(_lowercase , _lowercase , 1 )
# pass init params to Decoder
__UpperCAmelCase = Decoder(
in_channels=_lowercase , out_channels=_lowercase , up_block_types=_lowercase , block_out_channels=_lowercase , layers_per_block=_lowercase , act_fn=_lowercase , norm_num_groups=_lowercase , norm_type=_lowercase , )
@apply_forward_hook
def a ( self : List[Any] , _lowercase : torch.FloatTensor , _lowercase : bool = True ):
__UpperCAmelCase = self.encoder(_lowercase )
__UpperCAmelCase = self.quant_conv(_lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowercase )
@apply_forward_hook
def a ( self : Dict , _lowercase : torch.FloatTensor , _lowercase : bool = False , _lowercase : bool = True ):
# also go through quantization layer
if not force_not_quantize:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.quantize(_lowercase )
else:
__UpperCAmelCase = h
__UpperCAmelCase = self.post_quant_conv(_lowercase )
__UpperCAmelCase = self.decoder(_lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
def a ( self : Optional[Any] , _lowercase : torch.FloatTensor , _lowercase : bool = True ):
__UpperCAmelCase = sample
__UpperCAmelCase = self.encode(_lowercase ).latents
__UpperCAmelCase = self.decode(_lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
| 49 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 0 |
'''simple docstring'''
import math
import sys
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
UpperCAmelCase = [-1] * (number + 1)
UpperCAmelCase = 0
for i in range(1 , number + 1 ):
UpperCAmelCase = sys.maxsize
UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
UpperCAmelCase = 1 + answers[i - (j**2)]
UpperCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Any = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """speech_to_text_2"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Tuple , lowerCAmelCase_ : str=1_0_0_0_0 , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]="relu" , lowerCAmelCase_ : int=2_5_6 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1_0_2_4 , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 53 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : str =logging.get_logger(__name__)
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =os.path.abspath(lowercase__ )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
UpperCAmelCase_ =tf.train.list_variables(lowercase__ )
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase_ =full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase_ =name[1:]
# figure out how many levels deep the name is
UpperCAmelCase_ =0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(lowercase__ )
# read data
UpperCAmelCase_ =tf.train.load_variable(lowercase__ , lowercase__ )
names.append("/".join(lowercase__ ) )
arrays.append(lowercase__ )
logger.info(F'Read a total of {len(lowercase__ ):,} layers' )
# Sanity check
if len(set(lowercase__ ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(lowercase__ ) )})' )
UpperCAmelCase_ =list(set(lowercase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(lowercase__ , lowercase__ ):
UpperCAmelCase_ =full_name.split("/" )
UpperCAmelCase_ =model
UpperCAmelCase_ =[]
for i, m_name in enumerate(lowercase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
UpperCAmelCase_ =int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
UpperCAmelCase_ =getattr(lowercase__ , "embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
UpperCAmelCase_ =getattr(lowercase__ , "encoder" )
UpperCAmelCase_ =getattr(lowercase__ , "layer" )
UpperCAmelCase_ =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "pooler" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "token_type_embeddings" )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append("weight" )
UpperCAmelCase_ =getattr(lowercase__ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
UpperCAmelCase_ =getattr(lowercase__ , "attention" )
UpperCAmelCase_ =getattr(lowercase__ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
UpperCAmelCase_ =getattr(lowercase__ , "attention" )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "attention" )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
UpperCAmelCase_ =getattr(lowercase__ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
UpperCAmelCase_ =getattr(lowercase__ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
UpperCAmelCase_ =getattr(lowercase__ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "intermediate" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
UpperCAmelCase_ =getattr(lowercase__ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
UpperCAmelCase_ =getattr(lowercase__ , "weight" )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
UpperCAmelCase_ =".".join(lowercase__ )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , lowercase__ ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , lowercase__ ):
UpperCAmelCase_ =array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase_ =array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
logger.info(F'Loading model based on config from {config_path}...' )
UpperCAmelCase_ =BertConfig.from_json_file(lowercase__ )
UpperCAmelCase_ =BertModel(lowercase__ )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__lowercase : Dict =argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__lowercase : int =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 54 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 55 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case = torch.load(lowercase__ , map_location='cpu' )
if "model" in sd.keys():
__snake_case = torch.load(lowercase__ , map_location='cpu' )['model']
# pop unnecessary weights
__snake_case = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
__snake_case = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__snake_case = sd.pop(lowercase__ )
__snake_case = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__snake_case = sd[key]
# We split QKV in separate Q,K,V
__snake_case = key.replace('.qkv_proj.' , '.q_proj.' )
__snake_case = key.replace('.qkv_proj.' , '.k_proj.' )
__snake_case = key.replace('.qkv_proj.' , '.v_proj.' )
__snake_case = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__snake_case , __snake_case , __snake_case = torch.split(lowercase__ , depth // 3 , dim=0 )
__snake_case = q
__snake_case = k
__snake_case = v
del sd[key]
return sd
@torch.no_grad()
def _a (lowercase__ : str , lowercase__ : Tuple , lowercase__ : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
__snake_case = load_checkpoint(lowercase__ )
if config is not None:
__snake_case = OPTConfig.from_pretrained(lowercase__ )
else:
__snake_case = OPTConfig()
__snake_case = OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_a : List[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : List[Any] = 32
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = 1_6 , UpperCAmelCase__ = "bert-base-cased" ) -> Union[str, Any]:
UpperCamelCase_: str = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Tuple = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase_: Dict = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
UpperCamelCase_: Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
# Initialize accelerator
UpperCamelCase_: List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: List[str] = config['lr']
UpperCamelCase_: Any = int(config['num_epochs'] )
UpperCamelCase_: str = int(config['seed'] )
UpperCamelCase_: Union[str, Any] = int(config['batch_size'] )
UpperCamelCase_: List[Any] = args.model_name_or_path
set_seed(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: int = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Tuple = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
UpperCamelCase_: Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: Optional[int] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase_: Dict = 1
UpperCamelCase_: int = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: Dict = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
UpperCamelCase_: int = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Tuple = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: List[str] = 0
# Now we train the model
UpperCamelCase_: Dict = evaluate.load('glue' , 'mrpc' )
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Optional[Any] = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: List[Any] = model(**UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = outputs.loss
UpperCamelCase_: Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase_: Optional[int] = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: Optional[Any] = model(**UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
UpperCamelCase_: str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_: List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
UpperCamelCase_: Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCamelCase_: List[Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case () -> int:
UpperCamelCase_: str = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase__ , default=3 , help='Number of train epochs.' , )
UpperCamelCase_: int = parser.parse_args()
UpperCamelCase_: List[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 57 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =tempfile.mkdtemp()
lowerCamelCase__: Any =8
# DPR tok
lowerCamelCase__: Tuple =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__: int =os.path.join(self.tmpdirname , "dpr_tokenizer")
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Any =os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
lowerCamelCase__: str =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Tuple =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase__: Any ={"unk_token": "<unk>"}
lowerCamelCase__: str =os.path.join(self.tmpdirname , "bart_tokenizer")
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Dict =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: List[str] =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
})
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.get_dummy_dataset()
lowerCamelCase__: int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
lowerCamelCase__: int =dataset
lowerCamelCase__: str =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : bool) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: int =self.get_dummy_dataset()
lowerCamelCase__: int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , "dataset")
lowerCamelCase__: Tuple =os.path.join(self.tmpdirname , "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname , "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset"))
del dataset
lowerCamelCase__: Dict =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCamelCase__: Tuple =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_) , )
return retriever
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)],
})
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT)
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index")
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr")
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb"))
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl")
lowerCamelCase__: str ={sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , "wb"))
lowerCamelCase__: Dict =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowerCamelCase__: Any =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer())
return retriever
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =1
lowerCamelCase__: List[Any] =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__: Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
lowerCamelCase__: List[Any] =self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[str] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: Optional[Any] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[Any] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =1
lowerCamelCase__: Dict =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
lowerCamelCase__: str =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =1
lowerCamelCase__: Optional[int] =self.get_dummy_legacy_index_retriever()
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["text", "title"])
self.assertEqual(len(doc_dicts[0]["text"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["text"][0] , "bar") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Tuple =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
import torch
lowerCamelCase__: Union[str, Any] =1
lowerCamelCase__: Optional[Any] =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__: str =[[5, 7], [10, 11]]
lowerCamelCase__: List[str] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Optional[int] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =(
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
lowerCamelCase__: Optional[Any] =retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="pt" , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase__: Dict =1
lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_)
lowerCamelCase__: List[Any] =[[5, 7], [10, 11]]
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: List[Any] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_)
self.assertEqual(
len(UpperCAmelCase_) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")) , UpperCAmelCase_) # check for doc token related keys in dictionary.
| 59 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
lowerCAmelCase_ = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ : Dict = state_dict.pop(_UpperCamelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
snake_case_ : Optional[int] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
snake_case_ : str = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
snake_case_ : str = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , _UpperCamelCase )
# ffn -> feed_forward
snake_case_ : Union[str, Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , _UpperCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
snake_case_ : Union[str, Any] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
snake_case_ : Tuple = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
snake_case_ : Optional[Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
snake_case_ : str = '''rwkv.''' + name
snake_case_ : Dict = weight
return state_dict
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
snake_case_ : int = 50_277
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
snake_case_ : str = PreTrainedTokenizerFast(tokenizer_file=_UpperCamelCase )
snake_case_ : Optional[Any] = len(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
# 2. Build the config
snake_case_ : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ : Optional[Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case_ : Dict = RwkvConfig(
vocab_size=_UpperCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCamelCase )
# 3. Download model file then convert state_dict
snake_case_ : List[Any] = hf_hub_download(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : Optional[Any] = convert_state_dict(_UpperCamelCase )
# 4. Split in shards and save
snake_case_ , snake_case_ : List[str] = shard_checkpoint(_UpperCamelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
if index is not None:
snake_case_ : Optional[int] = os.path.join(_UpperCamelCase , _UpperCamelCase )
# Save the index as well
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Optional[int] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
snake_case_ : Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ : Optional[int] = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
snake_case_ : Any = AutoModelForCausalLM.from_pretrained(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 60 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Collection[float] | None = None ) -> None:
if components is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE__ )
def __len__( self : Tuple ) -> int:
return len(self.__components )
def __str__( self : List[Any] ) -> str:
return "(" + ",".join(map(SCREAMING_SNAKE_CASE__ , self.__components ) ) + ")"
def __add__( self : Tuple , SCREAMING_SNAKE_CASE__ : Vector ) -> Vector:
lowerCAmelCase__ = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else:
raise Exception("must have the same size" )
def __sub__( self : str , SCREAMING_SNAKE_CASE__ : Vector ) -> Vector:
lowerCAmelCase__ = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : List[str] , SCREAMING_SNAKE_CASE__ : float ) -> Vector:
...
@overload
def __mul__( self : Dict , SCREAMING_SNAKE_CASE__ : Vector ) -> float:
...
def __mul__( self : int , SCREAMING_SNAKE_CASE__ : float | Vector ) -> float | Vector:
if isinstance(SCREAMING_SNAKE_CASE__ , (float, int) ):
lowerCAmelCase__ = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(self ) == len(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return sum(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception("invalid operand!" )
def a ( self : Optional[int] ) -> Vector:
return Vector(self.__components )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> float:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def a ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
lowerCAmelCase__ = value
def a ( self : Tuple ) -> float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowerCAmelCase__ = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE__ ) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : bool = False ) -> float:
lowerCAmelCase__ = self * other
lowerCAmelCase__ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
return Vector([0] * dimension )
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ))
lowerCAmelCase__ = [0] * dimension
lowerCAmelCase__ = 1
return Vector(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : Vector , lowerCAmelCase_ : Vector ):
"""simple docstring"""
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (isinstance(lowerCAmelCase_ , (int, float) ))
)
return x * scalar + y
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
random.seed(lowerCAmelCase_ )
lowerCAmelCase__ = [random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : list[list[float]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
lowerCAmelCase__ = matrix
lowerCAmelCase__ = w
lowerCAmelCase__ = h
def __str__( self : List[str] ) -> str:
lowerCAmelCase__ = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
lowerCAmelCase__ = []
for i in range(self.__height ):
lowerCAmelCase__ = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
lowerCAmelCase__ = []
for i in range(self.__height ):
lowerCAmelCase__ = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : float ) -> Matrix:
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE__ : Vector ) -> Vector:
...
def __mul__( self : List[Any] , SCREAMING_SNAKE_CASE__ : float | Vector ) -> Vector | Matrix:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # matrix-vector
if len(SCREAMING_SNAKE_CASE__ ) == self.__width:
lowerCAmelCase__ = zero_vector(self.__height )
for i in range(self.__height ):
lowerCAmelCase__ = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE__ , sum(SCREAMING_SNAKE_CASE__ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # matrix-scalar
lowerCAmelCase__ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
return None
def a ( self : Dict ) -> int:
return self.__height
def a ( self : Dict ) -> int:
return self.__width
def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCAmelCase__ = value
else:
raise Exception("change_component: indices out of bounds" )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowerCAmelCase__ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowerCAmelCase__ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width - 1 , self.__height - 1 ).determinant()
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise Exception("Indices out of bounds" )
def a ( self : Any ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCAmelCase__ = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE__ ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE__ )
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * n for _ in range(lowerCAmelCase_ )]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
random.seed(lowerCAmelCase_ )
lowerCAmelCase__ = [
[random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )
]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 61 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _A ( self : Any ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=UpperCAmelCase_ , )
def _A ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _A ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _A ( self : str ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=UpperCAmelCase_ , )
def _A ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def lowerCamelCase__ ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@require_beam
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE : Tuple = DummyBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase_ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
SCREAMING_SNAKE_CASE : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCAmelCase_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCAmelCase_ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _A ( self : List[Any] ):
import apache_beam as beam
SCREAMING_SNAKE_CASE : List[Any] = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE : Any = DummyBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
SCREAMING_SNAKE_CASE : Tuple = partial(UpperCAmelCase_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase_ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase_ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
SCREAMING_SNAKE_CASE : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCAmelCase_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCAmelCase_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _A ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE : Optional[Any] = DummyBeamDataset(cache_dir=UpperCAmelCase_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE : Any = NestedBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase_ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
SCREAMING_SNAKE_CASE : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCAmelCase_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCAmelCase_ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 62 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 0 |
import re
from filelock import FileLock
try:
import nltk
a : Optional[int] = True
except (ImportError, ModuleNotFoundError):
a : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase__ ( __lowerCamelCase : str ):
re.sub("""<n>""" , """""" , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 63 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=768 ) -> Union[str, Any]:
super().__init__(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= proj_size
SCREAMING_SNAKE_CASE__: Tuple= CLIPVisionModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= PaintByExampleMapper(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=False ) -> Dict:
SCREAMING_SNAKE_CASE__: str= self.model(pixel_values=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= clip_output.pooler_output
SCREAMING_SNAKE_CASE__: List[Any]= self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE__: str= self.final_layer_norm(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self.proj_out(lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__: List[str]= (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE__: Tuple= config.hidden_size
SCREAMING_SNAKE_CASE__: List[Any]= 1
SCREAMING_SNAKE_CASE__: List[Any]= nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , activation_fn='''gelu''' , attention_bias=lowerCAmelCase )
for _ in range(lowerCAmelCase )
] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[Any]:
for block in self.blocks:
SCREAMING_SNAKE_CASE__: List[Any]= block(lowerCAmelCase )
return hidden_states
| 64 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
snake_case_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowercase ( self : Union[str, Any] ,A : Dict ,A : Union[str, Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TextaTextGenerationPipeline(model=A ,tokenizer=A )
return generator, ["Something to write", "Something else"]
def __lowercase ( self : str ,A : Union[str, Any] ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = generator("""Something there""" )
self.assertEqual(A ,[{"""generated_text""": ANY(A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
UpperCAmelCase__ : List[str] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=A )
self.assertEqual(
A ,[
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
] ,)
UpperCAmelCase__ : Union[str, Any] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=A )
self.assertEqual(
A ,[
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
] ,)
with self.assertRaises(A ):
generator(4 )
@require_torch
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ : int = generator("""Something there""" ,do_sample=A )
self.assertEqual(A ,[{"""generated_text""": """"""}] )
UpperCAmelCase__ : Union[str, Any] = 3
UpperCAmelCase__ : str = generator(
"""Something there""" ,num_return_sequences=A ,num_beams=A ,)
UpperCAmelCase__ : int = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(A ,A )
UpperCAmelCase__ : List[str] = generator("""This is a test""" ,do_sample=A ,num_return_sequences=2 ,return_tensors=A )
self.assertEqual(
A ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
UpperCAmelCase__ : Tuple = generator.model.config.eos_token_id
UpperCAmelCase__ : List[str] = """<pad>"""
UpperCAmelCase__ : Union[str, Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=A ,num_return_sequences=2 ,batch_size=2 ,return_tensors=A ,)
self.assertEqual(
A ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ : Optional[Any] = generator("""Something there""" ,do_sample=A )
self.assertEqual(A ,[{"""generated_text""": """"""}] )
| 65 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = "▁"
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : int = BertGenerationTokenizer
_UpperCamelCase : List[str] = False
_UpperCamelCase : Union[str, Any] = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ):
_lowercase : str = '<s>'
_lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_lowerCAmelCase ) , 1_0_0_2 )
def __a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __a ( self ):
_lowercase : int = BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowercase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
_lowercase : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowercase : int = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __a ( self ):
_lowercase : Union[str, Any] = 'Hello World!'
_lowercase : Dict = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def __a ( self ):
_lowercase : Any = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowercase : int = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@require_torch
@slow
def __a ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_lowercase : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
_lowercase : Union[str, Any] = ' '.join(_lowerCAmelCase )
_lowercase : int = self.big_tokenizer.encode_plus(_lowerCAmelCase , return_tensors='pt' , return_token_type_ids=_lowerCAmelCase )
_lowercase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_lowerCAmelCase )
_lowercase : Optional[int] = BertGenerationConfig()
_lowercase : Any = BertGenerationEncoder(_lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCAmelCase )
model(**_lowerCAmelCase )
@slow
def __a ( self ):
# fmt: off
_lowercase : List[Any] = {'input_ids': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 66 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer
def __init__( self : int ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : List[str]=None ,__A : Any="<|endoftext|>" ,__A : int="<|endoftext|>" ,__A : List[str]="<|endoftext|>" ,__A : str=False ,**__A : Union[str, Any] ,) -> int:
super().__init__(
__A ,__A ,tokenizer_file=__A ,unk_token=__A ,bos_token=__A ,eos_token=__A ,add_prefix_space=__A ,**__A ,)
_lowercase = kwargs.pop('add_bos_token' ,__A )
_lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__A ) != add_prefix_space:
_lowercase = getattr(__A ,pre_tok_state.pop('type' ) )
_lowercase = add_prefix_space
_lowercase = pre_tok_class(**__A )
_lowercase = add_prefix_space
def __UpperCAmelCase ( self : Any ,*__A : List[Any] ,**__A : Tuple ) -> BatchEncoding:
_lowercase = kwargs.get('is_split_into_words' ,__A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A ,**__A )
def __UpperCAmelCase ( self : int ,*__A : Any ,**__A : Any ) -> BatchEncoding:
_lowercase = kwargs.get('is_split_into_words' ,__A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A ,**__A )
def __UpperCAmelCase ( self : int ,__A : str ,__A : Optional[str] = None ) -> Tuple[str]:
_lowercase = self._tokenizer.model.save(__A ,name=__A )
return tuple(__A )
def __UpperCAmelCase ( self : List[Any] ,__A : "Conversation" ) -> List[int]:
_lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A ,add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_lowercase = input_ids[-self.model_max_length :]
return input_ids
| 67 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 0 |
import sys
__A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase__ ( A_: str ) -> int:
"""simple docstring"""
__UpperCAmelCase =1
for digit in s:
product *= int(A_ )
return product
def lowercase__ ( A_: str = N ) -> int:
"""simple docstring"""
__UpperCAmelCase =-sys.maxsize - 1
__UpperCAmelCase =n[:13]
__UpperCAmelCase =13
while cur_index < len(A_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__UpperCAmelCase =substr[1:] + n[cur_index]
cur_index += 1
else:
__UpperCAmelCase =max(A_ , str_eval(A_ ) )
__UpperCAmelCase =n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[str]="pt" ) -> List[Any]:
__snake_case = {"add_prefix_space": True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(" " ) else {}
__snake_case = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding="max_length" if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=None , ) -> List[Any]:
__snake_case = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Dict , a_ : List[str] , a_ : List[Any] , a_ : int , a_ : List[Any] , a_ : int="train" , a_ : Optional[int]=None , a_ : List[str]=None , a_ : Dict=None , a_ : str="" , ):
"""simple docstring"""
super().__init__()
__snake_case = Path(a_ ).joinpath(type_path + ".source" )
__snake_case = Path(a_ ).joinpath(type_path + ".target" )
__snake_case = self.get_char_lens(self.src_file )
__snake_case = max_source_length
__snake_case = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__snake_case = tokenizer
__snake_case = prefix
if n_obs is not None:
__snake_case = self.src_lens[:n_obs]
__snake_case = src_lang
__snake_case = tgt_lang
def __len__( self : Any ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Tuple , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = index + 1 # linecache starts at 1
__snake_case = self.prefix + linecache.getline(str(self.src_file ) , a_ ).rstrip("\n" )
__snake_case = linecache.getline(str(self.tgt_file ) , a_ ).rstrip("\n" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a_ ) else self.tokenizer
)
__snake_case = self.tokenizer.generator if isinstance(self.tokenizer , a_ ) else self.tokenizer
__snake_case = encode_line(a_ , a_ , self.max_source_length , "right" )
__snake_case = encode_line(a_ , a_ , self.max_target_length , "right" )
__snake_case = source_inputs["input_ids"].squeeze()
__snake_case = target_inputs["input_ids"].squeeze()
__snake_case = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A ( a_ : List[str] ):
"""simple docstring"""
return [len(a_ ) for x in Path(a_ ).open().readlines()]
def A ( self : List[Any] , a_ : str ):
"""simple docstring"""
__snake_case = torch.stack([x["input_ids"] for x in batch] )
__snake_case = torch.stack([x["attention_mask"] for x in batch] )
__snake_case = torch.stack([x["decoder_input_ids"] for x in batch] )
__snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a_ )
else self.tokenizer.pad_token_id
)
__snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a_ )
else self.tokenizer.pad_token_id
)
__snake_case = trim_batch(a_ , a_ )
__snake_case , __snake_case = trim_batch(a_ , a_ , attention_mask=a_ )
__snake_case = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
a : int = getLogger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : List[List] ) -> Tuple:
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> None:
__snake_case = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , "git_log.json" ) )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=4 , **_UpperCAmelCase : str ) -> Optional[Any]:
with open(_UpperCAmelCase , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> List[str]:
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def __UpperCAmelCase ( ) -> int:
__snake_case = git.Repo(search_parent_directories=_UpperCAmelCase )
__snake_case = {
"repo_id": str(_UpperCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __UpperCAmelCase ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Dict:
with open(_UpperCAmelCase , "wb" ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> List[str]:
def remove_articles(_UpperCAmelCase : int ):
return re.sub(R"\b(a|an|the)\b" , " " , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : int ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Optional[int] ):
__snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> int:
__snake_case = normalize_answer(_UpperCAmelCase ).split()
__snake_case = normalize_answer(_UpperCAmelCase ).split()
__snake_case = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
__snake_case = sum(common.values() )
if num_same == 0:
return 0
__snake_case = 1.0 * num_same / len(_UpperCAmelCase )
__snake_case = 1.0 * num_same / len(_UpperCAmelCase )
__snake_case = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[str]:
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
__snake_case = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
return model_prefix.startswith("rag" )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> int:
__snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__snake_case = "dropout_rate"
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
__snake_case = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 69 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
import random
class A:
'''simple docstring'''
@staticmethod
def a__ ( A_ : str ) -> tuple[list[int], list[int]]:
"""simple docstring"""
lowerCamelCase_ = [ord(A_ ) for i in text]
lowerCamelCase_ = []
lowerCamelCase_ = []
for i in plain:
lowerCamelCase_ = random.randint(1 , 300 )
lowerCamelCase_ = (i + k) * k
cipher.append(A_ )
key.append(A_ )
return cipher, key
@staticmethod
def a__ ( A_ : list[int] , A_ : list[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = []
for i in range(len(A_ ) ):
lowerCamelCase_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(A_ ) )
return "".join(A_ )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase : Tuple = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 70 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=False ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
UpperCAmelCase_ : int = "segformer.encoder." + key
if key.startswith("backbone" ):
UpperCAmelCase_ : Tuple = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_ : Optional[int] = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCAmelCase_ : Optional[int] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if "norm" in key:
UpperCAmelCase_ : List[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_ : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
UpperCAmelCase_ : List[str] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase_ : int = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCAmelCase_ : str = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_ : Optional[int] = key[key.find("block" ) + len("block" )]
UpperCAmelCase_ : Any = key.replace(F'''block{idx}''' , F'''block.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if "attn.q" in key:
UpperCAmelCase_ : List[str] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCAmelCase_ : int = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCAmelCase_ : int = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCAmelCase_ : List[str] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCAmelCase_ : Dict = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_ : List[str] = key[key.find("linear_c" ) + len("linear_c" )]
UpperCAmelCase_ : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if key.startswith("head" ):
UpperCAmelCase_ : Dict = key.replace("head" , "classifier" )
UpperCAmelCase_ : str = value
return new_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_ : int = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_ : int = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_ : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def a__ ( ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = SegformerConfig()
UpperCAmelCase_ : Any = False
# set attributes based on model_name
UpperCAmelCase_ : Optional[Any] = "huggingface/label-files"
if "segformer" in model_name:
UpperCAmelCase_ : Optional[int] = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
UpperCAmelCase_ : List[str] = 1_50
UpperCAmelCase_ : List[Any] = "ade20k-id2label.json"
UpperCAmelCase_ : str = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
UpperCAmelCase_ : Optional[int] = 19
UpperCAmelCase_ : Union[str, Any] = "cityscapes-id2label.json"
UpperCAmelCase_ : List[Any] = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = model_name[4:6]
UpperCAmelCase_ : Optional[int] = 10_00
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Optional[int] = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCAmelCase_ : Optional[Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Dict = 2_56
elif size == "b2":
UpperCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Union[str, Any] = 7_68
UpperCAmelCase_ : Union[str, Any] = [3, 4, 6, 3]
elif size == "b3":
UpperCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : int = 7_68
UpperCAmelCase_ : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
UpperCAmelCase_ : List[Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : List[Any] = 7_68
UpperCAmelCase_ : Tuple = [3, 8, 27, 3]
elif size == "b5":
UpperCAmelCase_ : Dict = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Optional[int] = 7_68
UpperCAmelCase_ : List[Any] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
UpperCAmelCase_ : Dict = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
# prepare image
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
UpperCAmelCase_ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
else:
UpperCAmelCase_ : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
UpperCAmelCase_ : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE , encoder_only=_SCREAMING_SNAKE_CASE )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
if encoder_only:
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = SegformerForImageClassification(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Any = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCAmelCase_ : Any = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCAmelCase_ : Tuple = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCAmelCase_ : Any = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCAmelCase_ : Tuple = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCAmelCase_ : Any = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCAmelCase_ : List[str] = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCAmelCase_ : List[str] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCAmelCase_ : int = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
UpperCAmelCase_ : Any = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 0 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCAmelCase : int = TypeVar('''T''')
class __magic_name__ ( Generic[T] ):
def __init__( self , snake_case_ = True ):
lowercase ={} # dictionary of lists
lowercase =directed
def _A( self , snake_case_ , snake_case_ ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
self.adj_list[destination_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
lowercase =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case_ )
lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase =[destination_vertex]
lowercase =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
lowercase =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase =[destination_vertex]
lowercase =[]
return self
def __repr__( self ):
return pformat(self.adj_list )
| 72 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : List[Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''ctrl'''
_lowercase : str = ['''past_key_values''']
_lowercase : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=24_6534 , a=256 , a=1280 , a=8192 , a=48 , a=16 , a=0.1 , a=0.1 , a=1E-6 , a=0.02 , a=True , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = dff
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
super().__init__(**a)
| 73 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__SCREAMING_SNAKE_CASE : int = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__SCREAMING_SNAKE_CASE : Any = model(_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 74 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase_ :
def __init__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {}
def lowercase_ ( self : Optional[int] , _A : Optional[int] , _A : str , _A : List[str]=1 ):
'''simple docstring'''
if self.graph.get(_A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase__ : Tuple = [[w, v]]
if not self.graph.get(_A ):
UpperCAmelCase__ : List[Any] = []
def lowercase_ ( self : Dict ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self : int , _A : List[str] , _A : str ):
'''simple docstring'''
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
def lowercase_ ( self : Dict , _A : List[Any]=-2 , _A : Optional[int]=-1 ):
'''simple docstring'''
if s == d:
return []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : int = []
if s == -2:
UpperCAmelCase__ : Tuple = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
UpperCAmelCase__ : Dict = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : str = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowercase_ ( self : Tuple , _A : int=-1 ):
'''simple docstring'''
if c == -1:
UpperCAmelCase__ : Tuple = floor(random() * 10_000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase__ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowercase_ ( self : Tuple , _A : List[str]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : int = deque()
UpperCAmelCase__ : Optional[int] = []
if s == -2:
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
UpperCAmelCase__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase_ ( self : Dict , _A : Union[str, Any] ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self : Dict , _A : Optional[Any]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = []
if s == -2:
UpperCAmelCase__ : List[str] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Any = s
UpperCAmelCase__ : Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_A ) != 0:
UpperCAmelCase__ : Union[str, Any] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return sorted_nodes
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : int = -2
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Dict = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Union[str, Any] = True
if len(_A ) != 0:
UpperCAmelCase__ : List[str] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = False
indirect_parents.append(_A )
UpperCAmelCase__ : Tuple = s
UpperCAmelCase__ : Tuple = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Dict = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Union[str, Any] = -2
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = s
UpperCAmelCase__ : str = False
UpperCAmelCase__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : List[str] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Union[str, Any] = True
if len(_A ) != 0:
UpperCAmelCase__ : int = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Dict = False
indirect_parents.append(_A )
UpperCAmelCase__ : int = s
UpperCAmelCase__ : Optional[int] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowercase_ ( self : Dict , _A : Any=-2 , _A : List[str]=-1 ):
'''simple docstring'''
UpperCAmelCase__ : Any = time()
self.dfs(_A , _A )
UpperCAmelCase__ : Union[str, Any] = time()
return end - begin
def lowercase_ ( self : Union[str, Any] , _A : Optional[int]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = time()
self.bfs(_A )
UpperCAmelCase__ : Any = time()
return end - begin
class lowerCamelCase_ :
def __init__( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
def lowercase_ ( self : List[Any] , _A : List[str] , _A : Optional[int] , _A : Any=1 ):
'''simple docstring'''
if self.graph.get(_A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase__ : Any = [[w, v]]
# add the other way
if self.graph.get(_A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase__ : List[str] = [[w, u]]
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
# the other way round
if self.graph.get(_A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_A )
def lowercase_ ( self : Optional[Any] , _A : int=-2 , _A : Optional[int]=-1 ):
'''simple docstring'''
if s == d:
return []
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Dict = []
if s == -2:
UpperCAmelCase__ : Optional[Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
UpperCAmelCase__ : str = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowercase_ ( self : str , _A : Tuple=-1 ):
'''simple docstring'''
if c == -1:
UpperCAmelCase__ : str = floor(random() * 10_000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowercase_ ( self : Tuple , _A : List[str]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = deque()
UpperCAmelCase__ : Optional[int] = []
if s == -2:
UpperCAmelCase__ : int = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
UpperCAmelCase__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self : Tuple , _A : int ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Union[str, Any] = -2
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[Any] = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Tuple = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Dict = True
if len(_A ) != 0:
UpperCAmelCase__ : Optional[int] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[int] = False
indirect_parents.append(_A )
UpperCAmelCase__ : Optional[int] = s
UpperCAmelCase__ : Dict = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Dict = -2
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[Any] = s
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Union[str, Any] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : List[str] = True
if len(_A ) != 0:
UpperCAmelCase__ : Any = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = False
indirect_parents.append(_A )
UpperCAmelCase__ : List[str] = s
UpperCAmelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self : List[Any] , _A : Any=-2 , _A : List[str]=-1 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = time()
self.dfs(_A , _A )
UpperCAmelCase__ : Tuple = time()
return end - begin
def lowercase_ ( self : int , _A : str=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = time()
self.bfs(_A )
UpperCAmelCase__ : int = time()
return end - begin
| 75 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase = 1_00_00_00 ):
__lowercase : Optional[Any] = limit + 1
__lowercase : str = [0] * limit
for first_term in range(1 , __UpperCamelCase ):
for n in range(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__lowercase : Union[str, Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 76 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> list:
"""simple docstring"""
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
for _ in range(UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 77 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase_ , UpperCAmelCase_ = array[indexa], array[indexa]
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase_ = int(length / 2 )
for i in range(snake_case_ , low + middle ):
comp_and_swap(snake_case_ , snake_case_ , i + middle , snake_case_ )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
bitonic_merge(snake_case_ , low + middle , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase_ = int(length / 2 )
bitonic_sort(snake_case_ , snake_case_ , snake_case_ , 1 )
bitonic_sort(snake_case_ , low + middle , snake_case_ , 0 )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_: int =[int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 78 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = ['pixel_values']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase__ : str = get_size_dict(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase__ : Any = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="""crop_size""" )
UpperCAmelCase__ : Optional[int] = do_resize
UpperCAmelCase__ : List[str] = do_rescale
UpperCAmelCase__ : Tuple = do_normalize
UpperCAmelCase__ : Tuple = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : Union[str, Any] = resample
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase__ : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
UpperCAmelCase__ : str = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
UpperCAmelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" , default_to_square=_lowerCAmelCase )
UpperCAmelCase__ : str = resample if resample is not None else self.resample
UpperCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Optional[Any] = size if size is not None else self.size
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCAmelCase )
if not is_batched(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = [images]
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Tuple = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : Any = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : Union[str, Any] = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ : Optional[int] = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase__ : Any = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 79 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 0 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCamelCase : List[Any] = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
__UpperCamelCase : Optional[int] = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = """ Hello world! cécé herlolip"""
__UpperCamelCase : List[Any] = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
__lowercase = torch.hub.load("""pytorch/fairseq""" , lowerCamelCase ).eval()
else:
__lowercase = load_xsum_checkpoint(lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowercase = checkpoint_path.replace(""".""" , """-""" )
__lowercase = BartConfig.from_pretrained(lowerCamelCase )
__lowercase = bart.encode(lowerCamelCase ).unsqueeze(0 )
__lowercase = BartTokenizer.from_pretrained(lowerCamelCase ).encode(lowerCamelCase , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(lowerCamelCase , lowerCamelCase ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowercase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase )
__lowercase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = BartForSequenceClassification(lowerCamelCase ).eval()
model.load_state_dict(lowerCamelCase )
__lowercase = bart.predict("""mnli""" , lowerCamelCase , return_logits=lowerCamelCase )
__lowercase = model(lowerCamelCase )[0] # logits
else: # no classification heads to worry about
__lowercase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase )
__lowercase = state_dict["""decoder.embed_tokens.weight"""]
__lowercase = bart.extract_features(lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
__lowercase = BartModel(lowerCamelCase ).eval()
model.load_state_dict(lowerCamelCase )
__lowercase = model(lowerCamelCase ).model[0]
else:
__lowercase = BartForConditionalGeneration(lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase )
if hasattr(lowerCamelCase , """lm_head""" ):
__lowercase = make_linear_from_emb(model.model.shared )
__lowercase = model.model(lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
__UpperCamelCase : Any = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = CustomTokenizer
pass
| 81 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 0 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 0 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = 256
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = ["melgan"]
def __init__( self : List[Any] , __lowerCAmelCase : SpectrogramNotesEncoder , __lowerCAmelCase : SpectrogramContEncoder , __lowerCAmelCase : TaFilmDecoder , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : OnnxRuntimeModel if is_onnx_available() else Any , ):
"""simple docstring"""
super().__init__()
# From MELGAN
_lowerCamelCase : Tuple = math.log(1E-5 ) # Matches MelGAN training.
_lowerCamelCase : Optional[Any] = 4.0 # Largest value for most examples
_lowerCamelCase : Tuple = 1_2_8
self.register_modules(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any]=(-1.0, 1.0) , __lowerCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = output_range
if clip:
_lowerCamelCase : Union[str, Any] = torch.clip(__lowerCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
_lowerCamelCase : List[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=(-1.0, 1.0) , __lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = input_range
_lowerCamelCase : int = torch.clip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if clip else outputs
# Scale to [0, 1].
_lowerCamelCase : Dict = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = input_tokens > 0
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.notes_encoder(
encoder_input_tokens=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : str = self.continuous_encoder(
encoder_inputs=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = noise_time
if not torch.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_lowerCamelCase : Any = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCamelCase : List[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_lowerCamelCase : Dict = self.decoder(
encodings_and_masks=__lowerCAmelCase , decoder_input_tokens=__lowerCAmelCase , decoder_noise_time=__lowerCAmelCase )
return logits
@torch.no_grad()
def __call__( self : str , __lowerCAmelCase : List[List[int]] , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : int = 1_0_0 , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "numpy" , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__lowerCAmelCase )}.''' )
_lowerCamelCase : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_lowerCamelCase : Dict = np.zeros([1, 0, self.n_dims] , np.floataa )
_lowerCamelCase : Optional[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(__lowerCAmelCase ):
if i == 0:
_lowerCamelCase : Optional[int] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_lowerCamelCase : Optional[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_lowerCamelCase : str = ones
_lowerCamelCase : Union[str, Any] = self.scale_features(
__lowerCAmelCase , output_range=[-1.0, 1.0] , clip=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__lowerCAmelCase , continuous_mask=__lowerCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_lowerCamelCase : Union[str, Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowerCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_lowerCamelCase : Tuple = self.decode(
encodings_and_masks=__lowerCAmelCase , input_tokens=__lowerCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_lowerCamelCase : Union[str, Any] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : Optional[int] = self.scale_to_features(__lowerCAmelCase , input_range=[-1.0, 1.0] )
_lowerCamelCase : Union[str, Any] = mel[:1]
_lowerCamelCase : int = mel.cpu().float().numpy()
_lowerCamelCase : Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase )
logger.info('''Generated segment''' , __lowerCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_lowerCamelCase : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_lowerCamelCase : str = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCAmelCase )
| 83 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCAmelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
lowercase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
lowercase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
lowercase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
lowercase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
lowercase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
lowercase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
lowercase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = {}
import re
lowercase = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowercase = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowercase = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowercase = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowercase = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowercase = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowercase = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
lowercase = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowercase = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_encoder_block_conv_in.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowercase = re_encoder_block_conv_in.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_encoder_block_resnet.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = {'1': 1, '3': 2}[groups[-2]]
lowercase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowercase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowercase = prefix + resnet_block
lowercase = re_encoder_block_resnet.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_encoder_block_proj_out.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowercase = re_encoder_block_proj_out.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_decoder_block_conv_out.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowercase = re_decoder_block_conv_out.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_decoder_block_resnet.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = {'1': 1, '3': 2}[groups[-2]]
lowercase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowercase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowercase = prefix + resnet_block
lowercase = re_decoder_block_resnet.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_decoder_block_proj_in.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowercase = re_decoder_block_proj_in.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_prior_cond_conv_out.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowercase = re_prior_cond_conv_out.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_prior_cond_resnet.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = {'1': 1, '3': 2}[groups[-2]]
lowercase = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowercase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowercase = prefix + resnet_block
lowercase = re_prior_cond_resnet.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(__SCREAMING_SNAKE_CASE ):
lowercase = re_prior_cond_proj_in.match(__SCREAMING_SNAKE_CASE )
lowercase = regex_match.groups()
lowercase = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowercase = re_prior_cond_proj_in.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# keep original key
else:
lowercase = original_key
lowercase = replace_key(__SCREAMING_SNAKE_CASE )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowercase = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowercase = original_key
lowercase = original_key
lowercase = value
return new_dict
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowercase = requests.get(F'''{PREFIX}{file}''' , allow_redirects=__SCREAMING_SNAKE_CASE )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__SCREAMING_SNAKE_CASE )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , 'wb' ).write(r.content )
lowercase = MODEL_MAPPING[model_name.split('/' )[-1]]
lowercase = JukeboxConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = JukeboxModel(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = {}
for i, dict_name in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['model']
lowercase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
lowercase = old_dic[k]
elif k.endswith('.w' ):
lowercase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase = old_dic[k]
else:
lowercase = old_dic[k]
lowercase = 'vqvae' if i == 0 else F'''priors.{3 - i}'''
lowercase = fix_jukebox_keys(__SCREAMING_SNAKE_CASE , model.state_dict() , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
weight_dict.append(__SCREAMING_SNAKE_CASE )
lowercase = weight_dict.pop(0 )
model.vqvae.load_state_dict(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , 'w' ) as txtfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 84 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _a ( lowercase__ : str , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = multiprocessing.Manager()
SCREAMING_SNAKE_CASE__ : Tuple = manager.list()
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _a ( lowercase__ : Tuple , lowercase__ : Any , lowercase__ : int ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE__ : Tuple = shutil.rmtree
SCREAMING_SNAKE_CASE__ : Dict = os.rmdir
SCREAMING_SNAKE_CASE__ : str = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
with swallow_io():
with time_limit(lowercase__ ):
exec(lowercase__ , lowercase__ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE__ : Optional[Any] = rmtree
SCREAMING_SNAKE_CASE__ : Any = rmdir
SCREAMING_SNAKE_CASE__ : Any = chdir
@contextlib.contextmanager
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
def signal_handler(lowercase__ : List[Any] , lowercase__ : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , lowercase__ )
signal.signal(signal.SIGALRM , lowercase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase__ ):
with contextlib.redirect_stderr(lowercase__ ):
with redirect_stdin(lowercase__ ):
yield
@contextlib.contextmanager
def _a ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase__ ):
yield dirname
class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( io.StringIO ):
def __lowercase( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : List[str] )-> int:
"""simple docstring"""
raise OSError
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Any )-> int:
"""simple docstring"""
raise OSError
def __lowercase( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : List[str] )-> Dict:
"""simple docstring"""
raise OSError
def __lowercase( self : Union[str, Any] , *a_ : Any , **a_ : Optional[int] )-> List[str]:
"""simple docstring"""
return False
class snake_case ( contextlib._RedirectStream ): # type: ignore
lowercase_ = 'stdin'
@contextlib.contextmanager
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
if root == ".":
yield
return
SCREAMING_SNAKE_CASE__ : str = os.getcwd()
os.chdir(lowercase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase__ )
def _a ( lowercase__ : Optional[int]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
import os
SCREAMING_SNAKE_CASE__ : Optional[Any] = '1'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
import shutil
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
import subprocess
SCREAMING_SNAKE_CASE__ : Tuple = None # type: ignore
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
import sys
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : int = None
| 85 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__a :List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _a :
"""simple docstring"""
_lowerCamelCase : List[str] = PegasusConfig
_lowerCamelCase : Any = {}
_lowerCamelCase : Optional[Any] = 'gelu'
def __init__( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : str=13 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=20 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Any=1 , UpperCAmelCase : Union[str, Any]=0 , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = eos_token_id
A_ = pad_token_id
A_ = bos_token_id
def __A ( self : Union[str, Any] ):
A_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A_ = prepare_pegasus_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ):
A_ = 20
A_ = model_class_name(UpperCAmelCase )
A_ = model.encode(inputs_dict["input_ids"] )
A_ , A_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
A_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
A_ = model.decode(UpperCAmelCase , UpperCAmelCase )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def __A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
A_ = 20
A_ = model_class_name(UpperCAmelCase )
A_ = model.encode(inputs_dict["input_ids"] )
A_ , A_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
A_ = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=None ,):
"""simple docstring"""
if attention_mask is None:
A_ = np.not_equal(__UpperCamelCase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_lowerCamelCase : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_lowerCamelCase : List[str] = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
def __A ( self : Optional[int] ):
A_ = FlaxPegasusModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase )
def __A ( self : Dict ):
self.config_tester.run_common_tests()
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : str ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("JIT Enabled" ):
A_ = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self : Optional[Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = model_class(UpperCAmelCase )
A_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
A_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("JIT Enabled" ):
A_ = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self : int ):
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=UpperCAmelCase )
A_ = np.ones((1, 1) )
A_ = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
def __A ( self : Optional[int] ):
A_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
A_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
A_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
A_ = tokenizer(UpperCAmelCase , return_tensors="np" , truncation=UpperCAmelCase , max_length=512 , padding=UpperCAmelCase )
A_ = model.generate(**UpperCAmelCase , num_beams=2 ).sequences
A_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
assert tgt_text == decoded
| 86 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 0 |
from itertools import count
def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int:
"""simple docstring"""
A__ = [1] * min_block_length
for n in count(lowercase_ ):
fill_count_functions.append(1 )
for block_length in range(lowercase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
SCREAMING_SNAKE_CASE : Optional[Any] = "▁"
class _lowerCamelCase( _a ):
lowercase_ : int = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = ["""input_ids""", """attention_mask"""]
lowercase_ : Dict = BarthezTokenizer
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, **lowerCamelCase, )
_lowercase : Tuple = vocab_file
_lowercase : Any = False if not self.vocab_file else True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Optional[Any] = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Union[str, Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
return (out_vocab_file,)
| 89 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A , A , A , A , ) -> None:
lowerCAmelCase__ = len(A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A , A , )
def _snake_case ( A ) -> None:
lowerCAmelCase__ = []
depth_first_search([] , [] , [] , A , A )
# Print all the boards
for board in boards:
for column in board:
print(A )
print('''''' )
print(len(A ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 90 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowercase = {
'''xlm-roberta-base''': 5_12,
'''xlm-roberta-large''': 5_12,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_12,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_12,
'''xlm-roberta-large-finetuned-conll03-english''': 5_12,
'''xlm-roberta-large-finetuned-conll03-german''': 5_12,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A = 1
A = len(self.sp_model ) + self.fairseq_offset
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) -> Any:
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str ,A_ : str ) -> Optional[Any]:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = ''.join(A_ ).replace(A_ ,' ' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 91 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase_ = ["""bert-base-uncased""", """bert-base-cased"""]
UpperCamelCase_ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( tf.keras.Model ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
super().__init__()
lowercase : Dict =tokenizer
lowercase : str =AutoConfig.from_pretrained(UpperCAmelCase__ )
lowercase : str =TFAutoModel.from_config(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer(UpperCAmelCase__ )
lowercase : int =self.bert(**UpperCAmelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
lowercase : str =[
BertTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowercase : Tuple =[TFBertTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCAmelCase__ , use_fast_bert_tokenizer=UpperCAmelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase : Any =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowercase : Optional[Any] =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase : Tuple =tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding='''longest''' )
lowercase : Optional[int] =tf_tokenizer(UpperCAmelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase : int =tf_tokenizer(self.paired_sentences )
lowercase : Dict =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase : Any =tf.function(UpperCAmelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase : int =tf.constant(UpperCAmelCase__ )
lowercase : List[str] =compiled_tokenizer(UpperCAmelCase__ )
lowercase : Tuple =tf_tokenizer(UpperCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase : str =ModelToSave(tokenizer=UpperCAmelCase__ )
lowercase : List[str] =tf.convert_to_tensor(self.test_sentences )
lowercase : List[Any] =model(UpperCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase : List[str] =Path(UpperCAmelCase__ ) / '''saved.model'''
model.save(UpperCAmelCase__ )
lowercase : Dict =tf.keras.models.load_model(UpperCAmelCase__ )
lowercase : Dict =loaded_model(UpperCAmelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 92 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowerCAmelCase__ :Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 95 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = OpenAIGPTTokenizer
UpperCAmelCase__ = OpenAIGPTTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__: str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__magic_name__: Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: List[str] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__magic_name__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
return "lower newer", "lower newer"
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__magic_name__: Union[str, Any] = """lower"""
__magic_name__: Optional[int] = ["""low""", """er</w>"""]
__magic_name__: List[Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__: Optional[int] = tokens + ["""<unk>"""]
__magic_name__: List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any=1_5 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__: str = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
__magic_name__: Dict = """This is a simple input"""
__magic_name__: Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__magic_name__: int = ("""This is a simple input""", """This is a pair""")
__magic_name__: int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
| 96 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : int ) -> List[Any]:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> int:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Union[str, Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Tuple ) -> Optional[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 97 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def a__ ( lowercase : Optional[Any], lowercase : List[str], lowercase : Tuple=None, lowercase : Tuple=None, lowercase : int=None, lowercase : Optional[int]=None, lowercase : Any=None, lowercase : Optional[int]=None, ) -> Any:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=13 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : Union[str, Any]=16 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : Dict=0.02 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = initializer_range
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case__ ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = 9_9
def snake_case__ ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCamelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_config_and_data()
_UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ )
_UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCamelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
_UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase , __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = True
_snake_case : str = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Union[str, Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxBlenderbotSmallModelTester(self )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : Any ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Any ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCamelCase = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
| 98 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """swinv2"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __A=224 , __A=4 , __A=3 , __A=96 , __A=[2, 2, 6, 2] , __A=[3, 6, 12, 24] , __A=7 , __A=4.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=False , __A=0.02 , __A=1E-5 , __A=32 , **__A , ):
super().__init__(**__A )
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(__A )
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = layer_norm_eps
__a = initializer_range
__a = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a = int(embed_dim * 2 ** (len(__A ) - 1) )
__a = (0, 0, 0, 0)
| 99 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.